aboutsummaryrefslogtreecommitdiffstats
path: root/linux-2.6.11-xen-sparse
diff options
context:
space:
mode:
Diffstat (limited to 'linux-2.6.11-xen-sparse')
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/Kconfig60
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 (renamed from linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig)79
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64995
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 (renamed from linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig)29
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64520
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/Kconfig336
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/Makefile1
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/Makefile17
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/acpi/Makefile13
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/acpi/boot.c906
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/apic.c83
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c1
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S84
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S3
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/io_apic.c2611
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ioport.c116
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/irq.c297
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ldt.c5
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/mpparse.c1115
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c50
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c171
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c55
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smp.c624
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c1437
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/time.c109
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/traps.c40
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/mach-default/Makefile12
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/mm/Makefile4
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/mm/fault.c9
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c340
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c25
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/mm/ioremap.c26
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/mm/pageattr.c226
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c216
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/pci/Makefile7
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/pci/direct.c81
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/pci/irq.c1050
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/kernel/Makefile6
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c133
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c125
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c2
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c387
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/kernel/reboot.c34
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/kernel/smp.c16
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/Kconfig456
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/Makefile92
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/Makefile66
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/asm-offsets.c70
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/e820.c533
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S1181
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head.S207
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head64.c132
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/init_task.c49
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ioport.c63
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/irq.c105
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ldt.c267
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c256
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-nommu.c96
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c713
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup.c1379
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup64.c346
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/signal.c493
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/smp.c411
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/smpboot.c958
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/traps.c974
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/vsyscall.c190
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/x8664_ksyms.c223
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/Makefile26
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/fault.c591
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c275
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c935
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/ioremap.c466
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/pageattr.c247
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/pci/Makefile41
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/pci/Makefile-BUS22
-rw-r--r--linux-2.6.11-xen-sparse/drivers/acpi/tables.c610
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/Makefile3
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/balloon/balloon.c50
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c379
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blkback/common.h48
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blkback/control.c34
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blkback/interface.c23
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blkback/vbd.c446
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c526
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blkfront/block.h5
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blktap/Makefile3
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap.c87
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap.h253
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_controlmsg.c539
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_datapath.c453
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_userdev.c471
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/console/console.c94
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/evtchn/evtchn.c17
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/netback/Makefile2
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/netback/common.h3
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/netback/control.c27
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/netback/interface.c64
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/netback/netback.c123
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c19
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/privcmd/privcmd.c59
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/usbback/common.h85
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/usbback/control.c61
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/usbback/interface.c252
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/usbback/usbback.c1070
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/usbfront/usbfront.c1738
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/usbfront/xhci.h183
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/desc.h5
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/fixmap.h10
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h520
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h21
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h7
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/smpboot_hooks.h55
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu.h22
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h55
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/msr.h272
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgalloc.h22
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h25
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h56
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/processor.h6
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/segment.h8
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/spinlock.h250
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/system.h77
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/tlbflush.h9
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/xor.h884
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/arch_hooks.h27
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/bootsetup.h41
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/desc.h240
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/dma-mapping.h136
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/fixmap.h108
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/floppy.h204
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h505
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/io.h365
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/irq.h36
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/io_ports.h30
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h137
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/mach_time.h122
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h48
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h47
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h5
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/smpboot_hooks.h55
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h76
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/page.h229
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/param.h22
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pci.h148
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pda.h85
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgalloc.h171
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h527
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/processor.h474
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/ptrace.h119
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/segment.h47
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/smp.h154
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/synch_bitops.h85
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/system.h395
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/timer.h64
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/tlbflush.h97
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/vga.h20
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/xor.h328
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/evtchn.h8
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/gnttab.h72
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h536
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/multicall.h107
-rw-r--r--linux-2.6.11-xen-sparse/mm/mmap.c2108
162 files changed, 38128 insertions, 4200 deletions
diff --git a/linux-2.6.11-xen-sparse/arch/xen/Kconfig b/linux-2.6.11-xen-sparse/arch/xen/Kconfig
index 27eac46739..480c4e8fd1 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/Kconfig
+++ b/linux-2.6.11-xen-sparse/arch/xen/Kconfig
@@ -48,6 +48,28 @@ config XEN_BLKDEV_BACKEND
block devices to other guests via a high-performance shared-memory
interface.
+config XEN_BLKDEV_TAP_BE
+ bool "Block Tap support for backend driver (DANGEROUS)"
+ depends on XEN_BLKDEV_BACKEND
+ default n
+ help
+ If you intend to use the block tap driver, the backend domain will
+ not know the domain id of the real frontend, and so will not be able
+ to map its data pages. This modifies the backend to attempt to map
+ from both the tap domain and the real frontend. This presents a
+ security risk, and so should ONLY be used for development
+ with the blktap. This option will be removed as the block drivers are
+ modified to use grant tables.
+
+config XEN_BLKDEV_GRANT
+ bool "Grant table substrate for block drivers"
+ depends on !XEN_BLKDEV_TAP_BE
+ default y
+ help
+ This introduces the use of grant tables as a data exhange mechanism
+ between the frontend and backend block drivers. This currently
+ conflicts with the block tap.
+
config XEN_NETDEV_BACKEND
bool "Network-device backend driver"
depends on XEN_PHYSDEV_ACCESS
@@ -92,9 +114,21 @@ config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
are unsure; or if you experience network hangs when this option is
enabled; then you must say N here.
-config XEN_WRITABLE_PAGETABLES
- bool
- default y
+config XEN_BLKDEV_TAP
+ bool "Block device tap driver"
+ default n
+ help
+ This driver allows a VM to interact on block device channels
+ to other VMs. Block messages may be passed through or redirected
+ to a character device, allowing device prototyping in application
+ space. Odds are that you want to say N here.
+
+config XEN_SHADOW_MODE
+ bool "Fake shadow mode"
+ default n
+ help
+ fakes out a shadow mode kernel
+
config XEN_SCRUB_PAGES
bool "Scrub memory before freeing it to Xen"
@@ -109,17 +143,17 @@ config XEN_SCRUB_PAGES
choice
prompt "Processor Type"
- default X86
+ default XEN_X86
-config X86
+config XEN_X86
bool "X86"
help
Choose this option if your computer is a X86 architecture.
-config X86_64
+config XEN_X86_64
bool "X86_64"
help
- Choose this option if your computer is a X86 architecture.
+ Choose this option if your computer is a X86_64 architecture.
endchoice
@@ -131,10 +165,14 @@ config HAVE_ARCH_DEV_ALLOC_SKB
source "init/Kconfig"
-if X86
+if XEN_X86
source "arch/xen/i386/Kconfig"
endif
+if XEN_X86_64
+source "arch/xen/x86_64/Kconfig"
+endif
+
menu "Executable file formats"
source "fs/Kconfig.binfmt"
@@ -143,6 +181,12 @@ endmenu
source "arch/xen/Kconfig.drivers"
+if XEN_PRIVILEGED_GUEST
+menu "Power management options"
+source "drivers/acpi/Kconfig"
+endmenu
+endif
+
source "fs/Kconfig"
source "security/Kconfig"
diff --git a/linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig b/linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32
index 70f8cd69ca..b869de31c7 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig
+++ b/linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.11-xen0
-# Tue May 3 13:22:55 2005
+# Wed May 4 17:11:56 2005
#
CONFIG_XEN=y
CONFIG_ARCH_XEN=y
@@ -13,14 +13,17 @@ CONFIG_NO_IDLE_HZ=y
CONFIG_XEN_PRIVILEGED_GUEST=y
CONFIG_XEN_PHYSDEV_ACCESS=y
CONFIG_XEN_BLKDEV_BACKEND=y
+# CONFIG_XEN_BLKDEV_TAP_BE is not set
+CONFIG_XEN_BLKDEV_GRANT=y
CONFIG_XEN_NETDEV_BACKEND=y
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_NETDEV_FRONTEND=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
-CONFIG_XEN_WRITABLE_PAGETABLES=y
+# CONFIG_XEN_BLKDEV_TAP is not set
+# CONFIG_XEN_SHADOW_MODE is not set
CONFIG_XEN_SCRUB_PAGES=y
-CONFIG_X86=y
-# CONFIG_X86_64 is not set
+CONFIG_XEN_X86=y
+# CONFIG_XEN_X86_64 is not set
CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
#
@@ -75,6 +78,7 @@ CONFIG_KMOD=y
# X86 Processor Configuration
#
CONFIG_XENARCH="i386"
+CONFIG_X86=y
CONFIG_MMU=y
CONFIG_UID16=y
CONFIG_GENERIC_ISA_DMA=y
@@ -129,12 +133,23 @@ CONFIG_NOHIGHMEM=y
CONFIG_MTRR=y
CONFIG_HAVE_DEC_LOCK=y
# CONFIG_REGPARM is not set
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
#
# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
#
+CONFIG_X86_UP_APIC=y
+CONFIG_X86_UP_IOAPIC=y
CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+CONFIG_PCI_GOANY=y
+# CONFIG_PCI_BIOS is not set
CONFIG_PCI_DIRECT=y
+# CONFIG_PCIEPORTBUS is not set
+# CONFIG_PCI_MSI is not set
CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_NAMES is not set
CONFIG_ISA=y
@@ -161,17 +176,25 @@ CONFIG_PCMCIA_PROBE=y
# Kernel hacking
#
CONFIG_DEBUG_KERNEL=y
-CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-# CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
# CONFIG_FRAME_POINTER is not set
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_KPROBES is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
# CONFIG_4KSTACKS is not set
+CONFIG_X86_FIND_SMP_CONFIG=y
+CONFIG_X86_MPPARSE=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_X86_BIOS_REBOOT=y
@@ -595,7 +618,7 @@ CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
+CONFIG_TUN=y
#
# ARCnet devices
@@ -838,6 +861,7 @@ CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
# CONFIG_MWAVE is not set
# CONFIG_RAW_DRIVER is not set
+# CONFIG_HPET is not set
# CONFIG_HANGCHECK_TIMER is not set
#
@@ -1004,6 +1028,37 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_INFINIBAND is not set
#
+# Power management options
+#
+
+#
+# ACPI (Advanced Configuration and Power Interface) Support
+#
+CONFIG_ACPI=y
+CONFIG_ACPI_BOOT=y
+CONFIG_ACPI_INTERPRETER=y
+CONFIG_ACPI_AC=m
+CONFIG_ACPI_BATTERY=m
+CONFIG_ACPI_BUTTON=m
+CONFIG_ACPI_VIDEO=m
+CONFIG_ACPI_FAN=m
+CONFIG_ACPI_PROCESSOR=m
+CONFIG_ACPI_THERMAL=m
+CONFIG_ACPI_ASUS=m
+CONFIG_ACPI_IBM=m
+CONFIG_ACPI_TOSHIBA=m
+# CONFIG_ACPI_CUSTOM_DSDT is not set
+CONFIG_ACPI_BLACKLIST_YEAR=0
+# CONFIG_ACPI_DEBUG is not set
+CONFIG_ACPI_BUS=y
+CONFIG_ACPI_EC=y
+CONFIG_ACPI_POWER=y
+CONFIG_ACPI_PCI=y
+CONFIG_ACPI_SYSTEM=y
+# CONFIG_X86_PM_TIMER is not set
+# CONFIG_ACPI_CONTAINER is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
@@ -1030,7 +1085,7 @@ CONFIG_REISERFS_FS=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=y
-# CONFIG_AUTOFS4_FS is not set
+CONFIG_AUTOFS4_FS=y
#
# CD-ROM/DVD Filesystems
diff --git a/linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 b/linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64
new file mode 100644
index 0000000000..9adb57f463
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64
@@ -0,0 +1,995 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.11-xen0
+# Thu Apr 14 19:45:44 2005
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+CONFIG_NO_IDLE_HZ=y
+
+#
+# XEN
+#
+CONFIG_XEN_PRIVILEGED_GUEST=y
+CONFIG_XEN_PHYSDEV_ACCESS=y
+CONFIG_XEN_BLKDEV_BACKEND=y
+# CONFIG_XEN_BLKDEV_TAP_BE is not set
+CONFIG_XEN_BLKDEV_GRANT=y
+CONFIG_XEN_NETDEV_BACKEND=y
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
+# CONFIG_XEN_BLKDEV_TAP is not set
+# CONFIG_XEN_SHADOW_MODE is not set
+CONFIG_XEN_SCRUB_PAGES=y
+# CONFIG_XEN_X86 is not set
+CONFIG_XEN_X86_64=y
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+# CONFIG_CLEAN_COMPILE is not set
+CONFIG_BROKEN=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_XENARCH="x86_64"
+CONFIG_X86=y
+CONFIG_MMU=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_X86_GOOD_APIC=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_SMP is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_MICROCODE is not set
+# CONFIG_X86_CPUID is not set
+# CONFIG_NUMA is not set
+# CONFIG_MTRR is not set
+CONFIG_PCI=y
+CONFIG_PCI_DIRECT=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+
+#
+# X86_64 processor configuration
+#
+CONFIG_X86_64=y
+CONFIG_64BIT=y
+
+#
+# Processor type and features
+#
+# CONFIG_MPSC is not set
+CONFIG_GENERIC_CPU=y
+CONFIG_X86_L1_CACHE_BYTES=128
+# CONFIG_X86_TSC is not set
+# CONFIG_X86_MSR is not set
+# CONFIG_GART_IOMMU is not set
+CONFIG_DUMMY_IOMMU=y
+# CONFIG_X86_MCE is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# ACPI (Advanced Configuration and Power Interface) Support
+#
+# CONFIG_ACPI is not set
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# Bus options (PCI etc.)
+#
+# CONFIG_PCI_MMCONFIG is not set
+# CONFIG_UNORDERED_IO is not set
+
+#
+# Executable file formats / Emulations
+#
+# CONFIG_IA32_EMULATION is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_FD=y
+# CONFIG_BLK_CPQ_DA is not set
+CONFIG_BLK_CPQ_CISS_DA=y
+# CONFIG_CISS_SCSI_TAPE is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_LBD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+# CONFIG_BLK_DEV_HD_IDE is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+CONFIG_BLK_DEV_IDECD=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_BLK_DEV_CMD640 is not set
+CONFIG_BLK_DEV_IDEPCI=y
+# CONFIG_IDEPCI_SHARE_IRQ is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_RZ1000 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_ATIIXP is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+CONFIG_BLK_DEV_PIIX=y
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+CONFIG_BLK_DEV_SVWKS=y
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SIS5513 is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+CONFIG_BLK_DEV_3W_XXXX_RAID=y
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+CONFIG_SCSI_AACRAID=y
+CONFIG_SCSI_AIC7XXX=y
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
+CONFIG_AIC7XXX_DEBUG_ENABLE=y
+CONFIG_AIC7XXX_DEBUG_MASK=0
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+CONFIG_SCSI_AIC79XX=y
+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
+CONFIG_AIC79XX_RESET_DELAY_MS=15000
+# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
+# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
+CONFIG_AIC79XX_DEBUG_ENABLE=y
+CONFIG_AIC79XX_DEBUG_MASK=0
+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
+# CONFIG_SCSI_ADVANSYS is not set
+CONFIG_MEGARAID_NEWGEN=y
+# CONFIG_MEGARAID_MM is not set
+CONFIG_SCSI_SATA=y
+# CONFIG_SCSI_SATA_AHCI is not set
+# CONFIG_SCSI_SATA_SVW is not set
+CONFIG_SCSI_ATA_PIIX=y
+# CONFIG_SCSI_SATA_NV is not set
+CONFIG_SCSI_SATA_PROMISE=y
+# CONFIG_SCSI_SATA_QSTOR is not set
+CONFIG_SCSI_SATA_SX4=y
+CONFIG_SCSI_SATA_SIL=y
+# CONFIG_SCSI_SATA_SIS is not set
+# CONFIG_SCSI_SATA_ULI is not set
+# CONFIG_SCSI_SATA_VIA is not set
+# CONFIG_SCSI_SATA_VITESSE is not set
+CONFIG_SCSI_BUSLOGIC=y
+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
+# CONFIG_SCSI_CPQFCTS is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_QLOGIC_ISP is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+CONFIG_SCSI_QLA2XXX=y
+# CONFIG_SCSI_QLA21XX is not set
+# CONFIG_SCSI_QLA22XX is not set
+# CONFIG_SCSI_QLA2300 is not set
+# CONFIG_SCSI_QLA2322 is not set
+# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+CONFIG_FUSION=y
+CONFIG_FUSION_MAX_SGE=40
+# CONFIG_FUSION_CTL is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_CT_ACCT=y
+# CONFIG_IP_NF_CONNTRACK_MARK is not set
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+# CONFIG_IP_NF_IRC is not set
+# CONFIG_IP_NF_TFTP is not set
+# CONFIG_IP_NF_AMANDA is not set
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+# CONFIG_IP_NF_MATCH_LIMIT is not set
+CONFIG_IP_NF_MATCH_IPRANGE=m
+# CONFIG_IP_NF_MATCH_MAC is not set
+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
+# CONFIG_IP_NF_MATCH_MARK is not set
+# CONFIG_IP_NF_MATCH_MULTIPORT is not set
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_DSCP is not set
+# CONFIG_IP_NF_MATCH_AH_ESP is not set
+# CONFIG_IP_NF_MATCH_LENGTH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_TCPMSS is not set
+# CONFIG_IP_NF_MATCH_HELPER is not set
+# CONFIG_IP_NF_MATCH_STATE is not set
+# CONFIG_IP_NF_MATCH_CONNTRACK is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+# CONFIG_IP_NF_MATCH_PHYSDEV is not set
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_REALM is not set
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_TARGET_TCPMSS is not set
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+# CONFIG_IP_NF_TARGET_REDIRECT is not set
+# CONFIG_IP_NF_TARGET_NETMAP is not set
+# CONFIG_IP_NF_TARGET_SAME is not set
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_FTP=m
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# Bridge: Netfilter Configuration
+#
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+CONFIG_BRIDGE=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=y
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+CONFIG_NET_VENDOR_3COM=y
+CONFIG_VORTEX=y
+# CONFIG_TYPHOON is not set
+
+#
+# Tulip family network device support
+#
+CONFIG_NET_TULIP=y
+# CONFIG_DE2104X is not set
+CONFIG_TULIP=y
+# CONFIG_TULIP_MWI is not set
+# CONFIG_TULIP_MMIO is not set
+# CONFIG_TULIP_NAPI is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_DM9102 is not set
+# CONFIG_HP100 is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=y
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_E100_NAPI is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+CONFIG_NE2K_PCI=y
+# CONFIG_8139CP is not set
+CONFIG_8139TOO=y
+CONFIG_8139TOO_PIO=y
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+CONFIG_VIA_RHINE=y
+# CONFIG_VIA_RHINE_MMIO is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+CONFIG_ACENIC=y
+# CONFIG_ACENIC_OMIT_TIGON_I is not set
+# CONFIG_DL2K is not set
+CONFIG_E1000=y
+# CONFIG_E1000_NAPI is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+CONFIG_TIGON3=y
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+CONFIG_AGP=m
+CONFIG_AGP_AMD64=m
+CONFIG_AGP_INTEL_MCH=m
+CONFIG_DRM=m
+CONFIG_DRM_TDFX=m
+# CONFIG_DRM_GAMMA is not set
+CONFIG_DRM_R128=m
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_MGA=m
+CONFIG_DRM_SIS=m
+# CONFIG_MWAVE is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+# CONFIG_IBM_ASM is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+# CONFIG_VIDEO_SELECT is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=y
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+
+#
+# XFS support
+#
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V4 is not set
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_SHA1=m
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
diff --git a/linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig b/linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32
index 37fce4937e..478d412669 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig
+++ b/linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.11-xenU
-# Wed Apr 13 23:18:37 2005
+# Wed May 4 17:14:10 2005
#
CONFIG_XEN=y
CONFIG_ARCH_XEN=y
@@ -12,13 +12,15 @@ CONFIG_NO_IDLE_HZ=y
#
# CONFIG_XEN_PRIVILEGED_GUEST is not set
# CONFIG_XEN_PHYSDEV_ACCESS is not set
+CONFIG_XEN_BLKDEV_GRANT=y
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_NETDEV_FRONTEND=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
-CONFIG_XEN_WRITABLE_PAGETABLES=y
+# CONFIG_XEN_BLKDEV_TAP is not set
+# CONFIG_XEN_SHADOW_MODE is not set
CONFIG_XEN_SCRUB_PAGES=y
-CONFIG_X86=y
-# CONFIG_X86_64 is not set
+CONFIG_XEN_X86=y
+# CONFIG_XEN_X86_64 is not set
CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
#
@@ -72,6 +74,7 @@ CONFIG_KMOD=y
# X86 Processor Configuration
#
CONFIG_XENARCH="i386"
+CONFIG_X86=y
CONFIG_MMU=y
CONFIG_UID16=y
CONFIG_GENERIC_ISA_DMA=y
@@ -129,16 +132,22 @@ CONFIG_HAVE_DEC_LOCK=y
# Kernel hacking
#
CONFIG_DEBUG_KERNEL=y
-CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-# CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
# CONFIG_FRAME_POINTER is not set
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_KPROBES is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
# CONFIG_4KSTACKS is not set
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 b/linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64
new file mode 100644
index 0000000000..e766ae7da1
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64
@@ -0,0 +1,520 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.11-xenU
+# Thu Apr 14 22:19:48 2005
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+CONFIG_NO_IDLE_HZ=y
+
+#
+# XEN
+#
+# CONFIG_XEN_PRIVILEGED_GUEST is not set
+# CONFIG_XEN_PHYSDEV_ACCESS is not set
+CONFIG_XEN_BLKDEV_GRANT=y
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
+# CONFIG_XEN_BLKDEV_TAP is not set
+# CONFIG_XEN_SHADOW_MODE is not set
+CONFIG_XEN_SCRUB_PAGES=y
+# CONFIG_XEN_X86 is not set
+CONFIG_XEN_X86_64=y
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_XENARCH="x86_64"
+CONFIG_X86=y
+CONFIG_MMU=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_X86_GOOD_APIC=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT=y
+# CONFIG_MICROCODE is not set
+CONFIG_X86_CPUID=y
+# CONFIG_NUMA is not set
+# CONFIG_MTRR is not set
+# CONFIG_PCI is not set
+# CONFIG_EARLY_PRINTK is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+
+#
+# X86_64 processor configuration
+#
+CONFIG_X86_64=y
+CONFIG_64BIT=y
+
+#
+# Processor type and features
+#
+# CONFIG_MPSC is not set
+CONFIG_GENERIC_CPU=y
+CONFIG_X86_L1_CACHE_BYTES=128
+# CONFIG_X86_TSC is not set
+# CONFIG_X86_MSR is not set
+CONFIG_DUMMY_IOMMU=y
+# CONFIG_X86_MCE is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# ACPI (Advanced Configuration and Power Interface) Support
+#
+# CONFIG_ACPI is not set
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# Bus options (PCI etc.)
+#
+# CONFIG_UNORDERED_IO is not set
+
+#
+# Executable file formats / Emulations
+#
+# CONFIG_IA32_EMULATION is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_LBD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=m
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# Character devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=y
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+
+#
+# XFS support
+#
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS_XATTR=y
+# CONFIG_DEVPTS_FS_SECURITY is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=m
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC32 is not set
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/Kconfig b/linux-2.6.11-xen-sparse/arch/xen/i386/Kconfig
index fb421d855d..d1ffcb05d1 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/Kconfig
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/Kconfig
@@ -9,6 +9,15 @@ config XENARCH
string
default i386
+config X86
+ bool
+ default y
+ help
+ This is Linux's home port. Linux was originally native to the Intel
+ 386, and runs on all the later x86 processors including the Intel
+ 486, 586, Pentiums, and various instruction-set-compatible chips by
+ AMD, Cyrix, and others.
+
config MMU
bool
default y
@@ -335,36 +344,33 @@ config HPET_EMULATE_RTC
def_bool HPET_TIMER && RTC=y
config SMP
- bool
- default n
-#config SMP
-# bool "Symmetric multi-processing support"
-# ---help---
-# This enables support for systems with more than one CPU. If you have
-# a system with only one CPU, like most personal computers, say N. If
-# you have a system with more than one CPU, say Y.
-#
-# If you say N here, the kernel will run on single and multiprocessor
-# machines, but will use only one CPU of a multiprocessor machine. If
-# you say Y here, the kernel will run on many, but not all,
-# singleprocessor machines. On a singleprocessor machine, the kernel
-# will run faster if you say N here.
-#
-# Note that if you say Y here and choose architecture "586" or
-# "Pentium" under "Processor family", the kernel will not work on 486
-# architectures. Similarly, multiprocessor kernels for the "PPro"
-# architecture may not work on all Pentium based boards.
-#
-# People using multiprocessor machines who say Y here should also say
-# Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
-# Management" code will be disabled if you say Y here.
-#
-# See also the <file:Documentation/smp.txt>,
-# <file:Documentation/i386/IO-APIC.txt>,
-# <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
-# <http://www.tldp.org/docs.html#howto>.
-#
-# If you don't know what to do here, say N.
+ bool "Symmetric multi-processing support"
+ ---help---
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, like most personal computers, say N. If
+ you have a system with more than one CPU, say Y.
+
+ If you say N here, the kernel will run on single and multiprocessor
+ machines, but will use only one CPU of a multiprocessor machine. If
+ you say Y here, the kernel will run on many, but not all,
+ singleprocessor machines. On a singleprocessor machine, the kernel
+ will run faster if you say N here.
+
+ Note that if you say Y here and choose architecture "586" or
+ "Pentium" under "Processor family", the kernel will not work on 486
+ architectures. Similarly, multiprocessor kernels for the "PPro"
+ architecture may not work on all Pentium based boards.
+
+ People using multiprocessor machines who say Y here should also say
+ Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
+ Management" code will be disabled if you say Y here.
+
+ See also the <file:Documentation/smp.txt>,
+ <file:Documentation/i386/IO-APIC.txt>,
+ <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+ <http://www.tldp.org/docs.html#howto>.
+
+ If you don't know what to do here, say N.
config NR_CPUS
int "Maximum number of CPUs (2-255)"
@@ -630,7 +636,7 @@ config MTRR
config IRQBALANCE
bool "Enable kernel irq balancing"
- depends on SMP && X86_IO_APIC
+ depends on SMP && X86_IO_APIC && !XEN
default y
help
The default yes will allow the kernel to do irq load balancing.
@@ -661,6 +667,25 @@ config REGPARM
generate incorrect output with certain kernel constructs when
-mregparm=3 is used.
+config X86_LOCAL_APIC
+ bool
+ depends on !SMP && X86_UP_APIC
+ default y
+
+config X86_IO_APIC
+ bool
+ depends on !SMP && X86_UP_IOAPIC
+ default y
+
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
+ depends on SMP && HOTPLUG && EXPERIMENTAL
+ ---help---
+ Say Y here to experiment with turning CPUs off and on. CPUs
+ can be controlled through /sys/devices/system/cpu.
+
+ Say N.
+
if XEN_PHYSDEV_ACCESS
@@ -676,6 +701,36 @@ config X86_LOCAL_APIC
depends on (X86_VISWS || SMP) && !X86_VOYAGER
default y
+config X86_UP_APIC
+ bool "Local APIC support on uniprocessors" if !SMP
+ depends on !(X86_VISWS || X86_VOYAGER)
+ ---help---
+ A local APIC (Advanced Programmable Interrupt Controller) is an
+ integrated interrupt controller in the CPU. If you have a single-CPU
+ system which has a processor with a local APIC, you can say Y here to
+ enable and use it. If you say Y here even though your machine doesn't
+ have a local APIC, then the kernel will still run with no slowdown at
+ all. The local APIC supports CPU-generated self-interrupts (timer,
+ performance counters), and the NMI watchdog which detects hard
+ lockups.
+
+ If you have a system with several CPUs, you do not need to say Y
+ here: the local APIC will be used automatically.
+
+config X86_UP_IOAPIC
+ bool "IO-APIC support on uniprocessors"
+ depends on !SMP && X86_UP_APIC
+ help
+ An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an
+ SMP-capable replacement for PC-style interrupt controllers. Most
+ SMP systems and a small number of uniprocessor systems have one.
+ If you have a single-CPU system with an IO-APIC, you can say Y here
+ to use it. If you say Y here even though your machine doesn't have
+ an IO-APIC, then the kernel will still run with no slowdown at all.
+
+ If you have a system with several CPUs, you do not need to say Y
+ here: the IO-APIC will be used automatically.
+
config X86_IO_APIC
bool
depends on SMP && !(X86_VISWS || X86_VOYAGER)
@@ -696,52 +751,53 @@ config PCI
information about which PCI hardware does work under Linux and which
doesn't.
-#choice
-# prompt "PCI access mode"
-# depends on PCI && !X86_VISWS
-# default PCI_GOANY
-# ---help---
-# On PCI systems, the BIOS can be used to detect the PCI devices and
-# determine their configuration. However, some old PCI motherboards
-# have BIOS bugs and may crash if this is done. Also, some embedded
-# PCI-based systems don't have any BIOS at all. Linux can also try to
-# detect the PCI hardware directly without using the BIOS.
-#
-# With this option, you can specify how Linux should detect the
-# PCI devices. If you choose "BIOS", the BIOS will be used,
-# if you choose "Direct", the BIOS won't be used, and if you
-# choose "MMConfig", then PCI Express MMCONFIG will be used.
-# If you choose "Any", the kernel will try MMCONFIG, then the
-# direct access method and falls back to the BIOS if that doesn't
-# work. If unsure, go with the default, which is "Any".
-#
-#config PCI_GOBIOS
-# bool "BIOS"
-#
-#config PCI_GOMMCONFIG
-# bool "MMConfig"
-#
-#config PCI_GODIRECT
-# bool "Direct"
-#
-#config PCI_GOANY
-# bool "Any"
-#
-#endchoice
-#
-#config PCI_BIOS
-# bool
-# depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
-# default y
-#
-#config PCI_DIRECT
-# bool
-# depends on PCI && ((PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
-# default y
+choice
+ prompt "PCI access mode"
+ depends on PCI && !X86_VISWS
+ default PCI_GOANY
+ ---help---
+ On PCI systems, the BIOS can be used to detect the PCI devices and
+ determine their configuration. However, some old PCI motherboards
+ have BIOS bugs and may crash if this is done. Also, some embedded
+ PCI-based systems don't have any BIOS at all. Linux can also try to
+ detect the PCI hardware directly without using the BIOS.
+
+ With this option, you can specify how Linux should detect the
+ PCI devices. If you choose "BIOS", the BIOS will be used,
+ if you choose "Direct", the BIOS won't be used, and if you
+ choose "MMConfig", then PCI Express MMCONFIG will be used.
+ If you choose "Any", the kernel will try MMCONFIG, then the
+ direct access method and falls back to the BIOS if that doesn't
+ work. If unsure, go with the default, which is "Any".
+
+config PCI_GOBIOS
+ bool "BIOS"
+
+config PCI_GOMMCONFIG
+ bool "MMConfig"
+
+config PCI_GODIRECT
+ bool "Direct"
+
+config PCI_GOANY
+ bool "Any"
+
+endchoice
+
+config PCI_BIOS
+ bool
+ depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
+ default y
config PCI_DIRECT
bool
- depends on PCI
+ depends on PCI && ((PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
+ default y
+
+config PCI_MMCONFIG
+ bool
+ depends on PCI && (PCI_GOMMCONFIG || (PCI_GOANY && ACPI))
+ select ACPI_BOOT
default y
source "drivers/pci/pcie/Kconfig"
@@ -811,129 +867,7 @@ endmenu
endif
-menu "Kernel hacking"
-
-config DEBUG_KERNEL
- bool "Kernel debugging"
- help
- Say Y here if you are developing drivers or trying to debug and
- identify kernel problems.
-
-config EARLY_PRINTK
- bool "Early printk" if EMBEDDED
- default y
- help
- Write kernel log output directly into the VGA buffer or to a serial
- port.
-
- This is useful for kernel debugging when your machine crashes very
- early before the console code is initialized. For normal operation
- it is not recommended because it looks ugly and doesn't cooperate
- with klogd/syslogd or the X server. You should normally N here,
- unless you want to debug such a crash.
-
-config DEBUG_STACKOVERFLOW
- bool "Check for stack overflows"
- depends on DEBUG_KERNEL
-
-config DEBUG_STACK_USAGE
- bool "Stack utilization instrumentation"
- depends on DEBUG_KERNEL
- help
- Enables the display of the minimum amount of free stack which each
- task has ever had available in the sysrq-T and sysrq-P debug output.
-
- This option will slow down process creation somewhat.
-
-config DEBUG_SLAB
- bool "Debug memory allocations"
- depends on DEBUG_KERNEL
- help
- Say Y here to have the kernel do limited verification on memory
- allocation as well as poisoning memory on free to catch use of freed
- memory.
-
-config MAGIC_SYSRQ
- bool "Magic SysRq key"
- depends on DEBUG_KERNEL
- help
- If you say Y here, you will have some control over the system even
- if the system crashes for example during kernel debugging (e.g., you
- will be able to flush the buffer cache to disk, reboot the system
- immediately or dump some status information). This is accomplished
- by pressing various keys while holding SysRq (Alt+PrintScreen). It
- also works on a serial console (on PC hardware at least), if you
- send a BREAK and then within 5 seconds a command keypress. The
- keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
- unless you really know what this hack does.
-
-config DEBUG_SPINLOCK
- bool "Spinlock debugging"
- depends on DEBUG_KERNEL
- help
- Say Y here and build SMP to catch missing spinlock initialization
- and certain other kinds of spinlock errors commonly made. This is
- best used in conjunction with the NMI watchdog so that spinlock
- deadlocks are also debuggable.
-
-config DEBUG_PAGEALLOC
- bool "Page alloc debugging"
- depends on DEBUG_KERNEL
- help
- Unmap pages from the kernel linear mapping after free_pages().
- This results in a large slowdown, but helps to find certain types
- of memory corruptions.
-
-config DEBUG_HIGHMEM
- bool "Highmem debugging"
- depends on DEBUG_KERNEL && HIGHMEM
- help
- This options enables addition error checking for high memory systems.
- Disable for production systems.
-
-config DEBUG_INFO
- bool "Compile the kernel with debug info"
- depends on DEBUG_KERNEL
- help
- If you say Y here the resulting kernel image will include
- debugging info resulting in a larger kernel image.
- Say Y here only if you plan to use gdb to debug the kernel.
- If you don't debug the kernel, you can say N.
-
-config DEBUG_SPINLOCK_SLEEP
- bool "Sleep-inside-spinlock checking"
- help
- If you say Y here, various routines which may sleep will become very
- noisy if they are called with a spinlock held.
-
-config FRAME_POINTER
- bool "Compile the kernel with frame pointers"
- help
- If you say Y here the resulting kernel image will be slightly larger
- and slower, but it will give very useful debugging information.
- If you don't debug the kernel, you can say N, but we may not be able
- to solve problems without frame pointers.
-
-config 4KSTACKS
- bool "Use 4Kb for kernel stacks instead of 8Kb"
- help
- If you say Y here the kernel will use a 4Kb stacksize for the
- kernel stack attached to each process/thread. This facilitates
- running more threads on a system and also reduces the pressure
- on the VM subsystem for higher order allocations. This option
- will also use IRQ stacks to compensate for the reduced stackspace.
-
-config X86_FIND_SMP_CONFIG
- bool
- depends on X86_LOCAL_APIC || X86_VOYAGER
- default y
-
-config X86_MPPARSE
- bool
- depends on X86_LOCAL_APIC && !X86_VISWS
- default y
-
-endmenu
+source "arch/i386/Kconfig.debug"
#
# Use the generic interrupt handling code in kernel/irq/:
@@ -951,10 +885,10 @@ config X86_SMP
depends on SMP && !X86_VOYAGER
default y
-config X86_HT
- bool
- depends on SMP && !(X86_VISWS || X86_VOYAGER)
- default y
+#config X86_HT
+# bool
+# depends on SMP && !(X86_VISWS || X86_VOYAGER)
+# default y
config X86_BIOS_REBOOT
bool
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/Makefile b/linux-2.6.11-xen-sparse/arch/xen/i386/Makefile
index 8af1059853..053c0984ac 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/Makefile
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/Makefile
@@ -72,6 +72,7 @@ head-y := arch/xen/i386/kernel/head.o arch/xen/i386/kernel/init_task.o
libs-y += arch/i386/lib/
core-y += arch/xen/i386/kernel/ \
arch/xen/i386/mm/ \
+ arch/xen/i386/mach-default/ \
arch/i386/crypto/
# \
# arch/xen/$(mcore-y)/
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/Makefile b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/Makefile
index 522bab987a..48cbb6ddd0 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/Makefile
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/Makefile
@@ -10,9 +10,9 @@ extra-y := head.o init_task.o
obj-y := process.o signal.o entry.o traps.o \
time.o ioport.o ldt.o setup.o \
- pci-dma.o i386_ksyms.o
+ pci-dma.o i386_ksyms.o irq.o
-c-obj-y := semaphore.o irq.o vm86.o \
+c-obj-y := semaphore.o vm86.o \
ptrace.o sys_i386.o \
i387.o dmi_scan.o bootflag.o \
doublefault.o quirks.o
@@ -20,18 +20,19 @@ s-obj-y :=
obj-y += cpu/
obj-y += timers/
-c-obj-$(CONFIG_ACPI_BOOT) += acpi/
+obj-$(CONFIG_ACPI_BOOT) += acpi/
#c-obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
c-obj-$(CONFIG_MCA) += mca.o
c-obj-$(CONFIG_X86_MSR) += msr.o
c-obj-$(CONFIG_X86_CPUID) += cpuid.o
obj-$(CONFIG_MICROCODE) += microcode.o
c-obj-$(CONFIG_APM) += apm.o
-c-obj-$(CONFIG_X86_SMP) += smp.o smpboot.o
-c-obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
-c-obj-$(CONFIG_X86_MPPARSE) += mpparse.o
-c-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
-c-obj-$(CONFIG_X86_IO_APIC) += io_apic.o
+obj-$(CONFIG_X86_SMP) += smp.o smpboot.o
+#obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
+obj-$(CONFIG_X86_MPPARSE) += mpparse.o
+obj-$(CONFIG_X86_LOCAL_APIC) += apic.o
+c-obj-$(CONFIG_X86_LOCAL_APIC) += nmi.o
+obj-$(CONFIG_X86_IO_APIC) += io_apic.o
c-obj-$(CONFIG_X86_NUMAQ) += numaq.o
c-obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o
c-obj-$(CONFIG_MODULES) += module.o
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/acpi/Makefile b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/acpi/Makefile
new file mode 100644
index 0000000000..9254f472f2
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/acpi/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_ACPI_BOOT) := boot.o
+c-obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
+c-obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+ @ln -fsn $(srctree)/arch/i386/kernel/acpi/$(notdir $@) $@
+
+obj-y += $(c-obj-y) $(s-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
+clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/acpi/boot.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/acpi/boot.c
new file mode 100644
index 0000000000..86ad650024
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/acpi/boot.c
@@ -0,0 +1,906 @@
+/*
+ * boot.c - Architecture-Specific Low-Level ACPI Boot Support
+ *
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/init.h>
+#include <linux/config.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include <asm/pgtable.h>
+#include <asm/io_apic.h>
+#include <asm/apic.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mpspec.h>
+#ifdef CONFIG_XEN
+#include <asm/fixmap.h>
+#endif
+
+void (*pm_power_off)(void) = NULL;
+
+#ifdef CONFIG_X86_64
+
+static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) { }
+extern void __init clustered_apic_check(void);
+static inline int ioapic_setup_disabled(void) { return 0; }
+#include <asm/proto.h>
+
+#else /* X86 */
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#include <mach_apic.h>
+#include <mach_mpparse.h>
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#endif /* X86 */
+
+#define BAD_MADT_ENTRY(entry, end) ( \
+ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
+ ((acpi_table_entry_header *)entry)->length != sizeof(*entry))
+
+#define PREFIX "ACPI: "
+
+#ifdef CONFIG_ACPI_PCI
+int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
+int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
+#else
+int acpi_noirq __initdata = 1;
+int acpi_pci_disabled __initdata = 1;
+#endif
+int acpi_ht __initdata = 1; /* enable HT */
+
+int acpi_lapic;
+int acpi_ioapic;
+int acpi_strict;
+EXPORT_SYMBOL(acpi_strict);
+
+acpi_interrupt_flags acpi_sci_flags __initdata;
+int acpi_sci_override_gsi __initdata;
+int acpi_skip_timer_override __initdata;
+
+#ifdef CONFIG_X86_LOCAL_APIC
+static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
+#endif
+
+#ifndef __HAVE_ARCH_CMPXCHG
+#warning ACPI uses CMPXCHG, i486 and later hardware
+#endif
+
+#define MAX_MADT_ENTRIES 256
+u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
+ { [0 ... MAX_MADT_ENTRIES-1] = 0xff };
+EXPORT_SYMBOL(x86_acpiid_to_apicid);
+
+/* --------------------------------------------------------------------------
+ Boot-time Configuration
+ -------------------------------------------------------------------------- */
+
+/*
+ * The default interrupt routing model is PIC (8259). This gets
+ * overriden if IOAPICs are enumerated (below).
+ */
+enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
+
+#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
+
+/* rely on all ACPI tables being in the direct mapping */
+char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
+{
+ if (!phys_addr || !size)
+ return NULL;
+
+ if (phys_addr < (end_pfn_map << PAGE_SHIFT))
+ return __va(phys_addr);
+
+ return NULL;
+}
+
+#else
+
+/*
+ * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
+ * to map the target physical address. The problem is that set_fixmap()
+ * provides a single page, and it is possible that the page is not
+ * sufficient.
+ * By using this area, we can map up to MAX_IO_APICS pages temporarily,
+ * i.e. until the next __va_range() call.
+ *
+ * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
+ * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
+ * count idx down while incrementing the phys address.
+ */
+char *__acpi_map_table(unsigned long phys, unsigned long size)
+{
+ unsigned long base, offset, mapped_size;
+ int idx;
+
+#ifndef CONFIG_XEN
+ if (phys + size < 8*1024*1024)
+ return __va(phys);
+#endif
+
+ offset = phys & (PAGE_SIZE - 1);
+ mapped_size = PAGE_SIZE - offset;
+ set_fixmap(FIX_ACPI_END, phys);
+ base = fix_to_virt(FIX_ACPI_END);
+
+ /*
+ * Most cases can be covered by the below.
+ */
+ idx = FIX_ACPI_END;
+ while (mapped_size < size) {
+ if (--idx < FIX_ACPI_BEGIN)
+ return NULL; /* cannot handle this */
+ phys += PAGE_SIZE;
+ set_fixmap(idx, phys);
+ mapped_size += PAGE_SIZE;
+ }
+
+ return ((unsigned char *) base + offset);
+}
+#endif
+
+#ifdef CONFIG_PCI_MMCONFIG
+static int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
+{
+ struct acpi_table_mcfg *mcfg;
+
+ if (!phys_addr || !size)
+ return -EINVAL;
+
+ mcfg = (struct acpi_table_mcfg *) __acpi_map_table(phys_addr, size);
+ if (!mcfg) {
+ printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
+ return -ENODEV;
+ }
+
+ if (mcfg->base_reserved) {
+ printk(KERN_ERR PREFIX "MMCONFIG not in low 4GB of memory\n");
+ return -ENODEV;
+ }
+
+ pci_mmcfg_base_addr = mcfg->base_address;
+
+ return 0;
+}
+#else
+#define acpi_parse_mcfg NULL
+#endif /* !CONFIG_PCI_MMCONFIG */
+
+#ifdef CONFIG_X86_LOCAL_APIC
+static int __init
+acpi_parse_madt (
+ unsigned long phys_addr,
+ unsigned long size)
+{
+ struct acpi_table_madt *madt = NULL;
+
+ if (!phys_addr || !size)
+ return -EINVAL;
+
+ madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size);
+ if (!madt) {
+ printk(KERN_WARNING PREFIX "Unable to map MADT\n");
+ return -ENODEV;
+ }
+
+ if (madt->lapic_address) {
+ acpi_lapic_addr = (u64) madt->lapic_address;
+
+ printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
+ madt->lapic_address);
+ }
+
+ acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
+
+ return 0;
+}
+
+
+static int __init
+acpi_parse_lapic (
+ acpi_table_entry_header *header, const unsigned long end)
+{
+ struct acpi_table_lapic *processor = NULL;
+
+ processor = (struct acpi_table_lapic*) header;
+
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ /* no utility in registering a disabled processor */
+ if (processor->flags.enabled == 0)
+ return 0;
+
+ x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
+
+ mp_register_lapic (
+ processor->id, /* APIC ID */
+ processor->flags.enabled); /* Enabled? */
+
+ return 0;
+}
+
+static int __init
+acpi_parse_lapic_addr_ovr (
+ acpi_table_entry_header *header, const unsigned long end)
+{
+ struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
+
+ lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header;
+
+ if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
+ return -EINVAL;
+
+ acpi_lapic_addr = lapic_addr_ovr->address;
+
+ return 0;
+}
+
+static int __init
+acpi_parse_lapic_nmi (
+ acpi_table_entry_header *header, const unsigned long end)
+{
+ struct acpi_table_lapic_nmi *lapic_nmi = NULL;
+
+ lapic_nmi = (struct acpi_table_lapic_nmi*) header;
+
+ if (BAD_MADT_ENTRY(lapic_nmi, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ if (lapic_nmi->lint != 1)
+ printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+
+ return 0;
+}
+
+
+#endif /*CONFIG_X86_LOCAL_APIC*/
+
+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
+
+static int __init
+acpi_parse_ioapic (
+ acpi_table_entry_header *header, const unsigned long end)
+{
+ struct acpi_table_ioapic *ioapic = NULL;
+
+ ioapic = (struct acpi_table_ioapic*) header;
+
+ if (BAD_MADT_ENTRY(ioapic, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ mp_register_ioapic (
+ ioapic->id,
+ ioapic->address,
+ ioapic->global_irq_base);
+
+ return 0;
+}
+
+/*
+ * Parse Interrupt Source Override for the ACPI SCI
+ */
+static void
+acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
+{
+ if (trigger == 0) /* compatible SCI trigger is level */
+ trigger = 3;
+
+ if (polarity == 0) /* compatible SCI polarity is low */
+ polarity = 3;
+
+ /* Command-line over-ride via acpi_sci= */
+ if (acpi_sci_flags.trigger)
+ trigger = acpi_sci_flags.trigger;
+
+ if (acpi_sci_flags.polarity)
+ polarity = acpi_sci_flags.polarity;
+
+ /*
+ * mp_config_acpi_legacy_irqs() already setup IRQs < 16
+ * If GSI is < 16, this will update its flags,
+ * else it will create a new mp_irqs[] entry.
+ */
+ mp_override_legacy_irq(gsi, polarity, trigger, gsi);
+
+ /*
+ * stash over-ride to indicate we've been here
+ * and for later update of acpi_fadt
+ */
+ acpi_sci_override_gsi = gsi;
+ return;
+}
+
+static int __init
+acpi_parse_int_src_ovr (
+ acpi_table_entry_header *header, const unsigned long end)
+{
+ struct acpi_table_int_src_ovr *intsrc = NULL;
+
+ intsrc = (struct acpi_table_int_src_ovr*) header;
+
+ if (BAD_MADT_ENTRY(intsrc, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ if (intsrc->bus_irq == acpi_fadt.sci_int) {
+ acpi_sci_ioapic_setup(intsrc->global_irq,
+ intsrc->flags.polarity, intsrc->flags.trigger);
+ return 0;
+ }
+
+ if (acpi_skip_timer_override &&
+ intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
+ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+ return 0;
+ }
+
+ mp_override_legacy_irq (
+ intsrc->bus_irq,
+ intsrc->flags.polarity,
+ intsrc->flags.trigger,
+ intsrc->global_irq);
+
+ return 0;
+}
+
+
+static int __init
+acpi_parse_nmi_src (
+ acpi_table_entry_header *header, const unsigned long end)
+{
+ struct acpi_table_nmi_src *nmi_src = NULL;
+
+ nmi_src = (struct acpi_table_nmi_src*) header;
+
+ if (BAD_MADT_ENTRY(nmi_src, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ /* TBD: Support nimsrc entries? */
+
+ return 0;
+}
+
+#endif /* CONFIG_X86_IO_APIC */
+
+#ifdef CONFIG_ACPI_BUS
+
+/*
+ * acpi_pic_sci_set_trigger()
+ *
+ * use ELCR to set PIC-mode trigger type for SCI
+ *
+ * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
+ * it may require Edge Trigger -- use "acpi_sci=edge"
+ *
+ * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
+ * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
+ * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
+ * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
+ */
+
+void __init
+acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
+{
+ unsigned int mask = 1 << irq;
+ unsigned int old, new;
+
+ /* Real old ELCR mask */
+ old = inb(0x4d0) | (inb(0x4d1) << 8);
+
+ /*
+ * If we use ACPI to set PCI irq's, then we should clear ELCR
+ * since we will set it correctly as we enable the PCI irq
+ * routing.
+ */
+ new = acpi_noirq ? old : 0;
+
+ /*
+ * Update SCI information in the ELCR, it isn't in the PCI
+ * routing tables..
+ */
+ switch (trigger) {
+ case 1: /* Edge - clear */
+ new &= ~mask;
+ break;
+ case 3: /* Level - set */
+ new |= mask;
+ break;
+ }
+
+ if (old == new)
+ return;
+
+ printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
+ outb(new, 0x4d0);
+ outb(new >> 8, 0x4d1);
+}
+
+
+#endif /* CONFIG_ACPI_BUS */
+
+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
+{
+#ifdef CONFIG_X86_IO_APIC
+ if (use_pci_vector() && !platform_legacy_irq(gsi))
+ *irq = IO_APIC_VECTOR(gsi);
+ else
+#endif
+ *irq = gsi;
+ return 0;
+}
+
+unsigned int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
+{
+ unsigned int irq;
+ unsigned int plat_gsi = gsi;
+
+#ifdef CONFIG_X86_IO_APIC
+ if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
+ plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low);
+ }
+#endif
+ acpi_gsi_to_irq(plat_gsi, &irq);
+ return irq;
+}
+EXPORT_SYMBOL(acpi_register_gsi);
+
+/*
+ * ACPI based hotplug support for CPU
+ */
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+int
+acpi_map_lsapic(acpi_handle handle, int *pcpu)
+{
+ /* TBD */
+ return -EINVAL;
+}
+EXPORT_SYMBOL(acpi_map_lsapic);
+
+
+int
+acpi_unmap_lsapic(int cpu)
+{
+ /* TBD */
+ return -EINVAL;
+}
+EXPORT_SYMBOL(acpi_unmap_lsapic);
+#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+
+static unsigned long __init
+acpi_scan_rsdp (
+ unsigned long start,
+ unsigned long length)
+{
+ unsigned long offset = 0;
+ unsigned long sig_len = sizeof("RSD PTR ") - 1;
+ unsigned long vstart = (unsigned long)isa_bus_to_virt(start);
+
+ /*
+ * Scan all 16-byte boundaries of the physical memory region for the
+ * RSDP signature.
+ */
+ for (offset = 0; offset < length; offset += 16) {
+ if (strncmp((char *) (vstart + offset), "RSD PTR ", sig_len))
+ continue;
+ return (start + offset);
+ }
+
+ return 0;
+}
+
+static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
+{
+ struct acpi_table_sbf *sb;
+
+ if (!phys_addr || !size)
+ return -EINVAL;
+
+ sb = (struct acpi_table_sbf *) __acpi_map_table(phys_addr, size);
+ if (!sb) {
+ printk(KERN_WARNING PREFIX "Unable to map SBF\n");
+ return -ENODEV;
+ }
+
+ sbf_port = sb->sbf_cmos; /* Save CMOS port */
+
+ return 0;
+}
+
+
+#ifdef CONFIG_HPET_TIMER
+
+static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
+{
+ struct acpi_table_hpet *hpet_tbl;
+
+ if (!phys || !size)
+ return -EINVAL;
+
+ hpet_tbl = (struct acpi_table_hpet *) __acpi_map_table(phys, size);
+ if (!hpet_tbl) {
+ printk(KERN_WARNING PREFIX "Unable to map HPET\n");
+ return -ENODEV;
+ }
+
+ if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
+ printk(KERN_WARNING PREFIX "HPET timers must be located in "
+ "memory.\n");
+ return -1;
+ }
+
+#ifdef CONFIG_X86_64
+ vxtime.hpet_address = hpet_tbl->addr.addrl |
+ ((long) hpet_tbl->addr.addrh << 32);
+
+ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
+ hpet_tbl->id, vxtime.hpet_address);
+#else /* X86 */
+ {
+ extern unsigned long hpet_address;
+
+ hpet_address = hpet_tbl->addr.addrl;
+ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
+ hpet_tbl->id, hpet_address);
+ }
+#endif /* X86 */
+
+ return 0;
+}
+#else
+#define acpi_parse_hpet NULL
+#endif
+
+#ifdef CONFIG_X86_PM_TIMER
+extern u32 pmtmr_ioport;
+#endif
+
+static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
+{
+ struct fadt_descriptor_rev2 *fadt = NULL;
+
+ fadt = (struct fadt_descriptor_rev2*) __acpi_map_table(phys,size);
+ if(!fadt) {
+ printk(KERN_WARNING PREFIX "Unable to map FADT\n");
+ return 0;
+ }
+
+#ifdef CONFIG_ACPI_INTERPRETER
+ /* initialize sci_int early for INT_SRC_OVR MADT parsing */
+ acpi_fadt.sci_int = fadt->sci_int;
+#endif
+
+#ifdef CONFIG_X86_PM_TIMER
+ /* detect the location of the ACPI PM Timer */
+ if (fadt->revision >= FADT2_REVISION_ID) {
+ /* FADT rev. 2 */
+ if (fadt->xpm_tmr_blk.address_space_id != ACPI_ADR_SPACE_SYSTEM_IO)
+ return 0;
+
+ pmtmr_ioport = fadt->xpm_tmr_blk.address;
+ } else {
+ /* FADT rev. 1 */
+ pmtmr_ioport = fadt->V1_pm_tmr_blk;
+ }
+ if (pmtmr_ioport)
+ printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", pmtmr_ioport);
+#endif
+ return 0;
+}
+
+
+unsigned long __init
+acpi_find_rsdp (void)
+{
+ unsigned long rsdp_phys = 0;
+
+ if (efi_enabled) {
+ if (efi.acpi20)
+ return __pa(efi.acpi20);
+ else if (efi.acpi)
+ return __pa(efi.acpi);
+ }
+ /*
+ * Scan memory looking for the RSDP signature. First search EBDA (low
+ * memory) paragraphs and then search upper memory (E0000-FFFFF).
+ */
+ rsdp_phys = acpi_scan_rsdp (0, 0x400);
+ if (!rsdp_phys)
+ rsdp_phys = acpi_scan_rsdp (0xE0000, 0x20000);
+
+ set_fixmap(FIX_ACPI_RSDP_PAGE, rsdp_phys);
+
+ return rsdp_phys;
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+/*
+ * Parse LAPIC entries in MADT
+ * returns 0 on success, < 0 on error
+ */
+static int __init
+acpi_parse_madt_lapic_entries(void)
+{
+ int count;
+
+ /*
+ * Note that the LAPIC address is obtained from the MADT (32-bit value)
+ * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
+ */
+
+ count = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0);
+ if (count < 0) {
+ printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
+ return count;
+ }
+
+ mp_register_lapic_address(acpi_lapic_addr);
+
+ count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
+ MAX_APICS);
+ if (!count) {
+ printk(KERN_ERR PREFIX "No LAPIC entries present\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return -ENODEV;
+ }
+ else if (count < 0) {
+ printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return count;
+ }
+
+ count = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
+ if (count < 0) {
+ printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return count;
+ }
+ return 0;
+}
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
+/*
+ * Parse IOAPIC related entries in MADT
+ * returns 0 on success, < 0 on error
+ */
+static int __init
+acpi_parse_madt_ioapic_entries(void)
+{
+ int count;
+
+ /*
+ * ACPI interpreter is required to complete interrupt setup,
+ * so if it is off, don't enumerate the io-apics with ACPI.
+ * If MPS is present, it will handle them,
+ * otherwise the system will stay in PIC mode
+ */
+ if (acpi_disabled || acpi_noirq) {
+ return -ENODEV;
+ }
+
+ /*
+ * if "noapic" boot option, don't look for IO-APICs
+ */
+ if (skip_ioapic_setup) {
+ printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
+ "due to 'noapic' option.\n");
+ return -ENODEV;
+ }
+
+ count = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic, MAX_IO_APICS);
+ if (!count) {
+ printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
+ return -ENODEV;
+ }
+ else if (count < 0) {
+ printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
+ return count;
+ }
+
+ count = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, NR_IRQ_VECTORS);
+ if (count < 0) {
+ printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return count;
+ }
+
+ /*
+ * If BIOS did not supply an INT_SRC_OVR for the SCI
+ * pretend we got one so we can set the SCI flags.
+ */
+ if (!acpi_sci_override_gsi)
+ acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
+
+ /* Fill in identity legacy mapings where no override */
+ mp_config_acpi_legacy_irqs();
+
+ count = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, NR_IRQ_VECTORS);
+ if (count < 0) {
+ printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return count;
+ }
+
+ return 0;
+}
+#else
+static inline int acpi_parse_madt_ioapic_entries(void)
+{
+ return -1;
+}
+#endif /* !(CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER) */
+
+
+static void __init
+acpi_process_madt(void)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+ int count, error;
+
+ count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
+ if (count >= 1) {
+
+ /*
+ * Parse MADT LAPIC entries
+ */
+ error = acpi_parse_madt_lapic_entries();
+ if (!error) {
+ acpi_lapic = 1;
+
+ /*
+ * Parse MADT IO-APIC entries
+ */
+ error = acpi_parse_madt_ioapic_entries();
+ if (!error) {
+ acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
+ acpi_irq_balance_set(NULL);
+ acpi_ioapic = 1;
+
+ smp_found_config = 1;
+ clustered_apic_check();
+ }
+ }
+ if (error == -EINVAL) {
+ /*
+ * Dell Precision Workstation 410, 610 come here.
+ */
+ printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n");
+ disable_acpi();
+ }
+ }
+#endif
+ return;
+}
+
+/*
+ * acpi_boot_table_init() and acpi_boot_init()
+ * called from setup_arch(), always.
+ * 1. checksums all tables
+ * 2. enumerates lapics
+ * 3. enumerates io-apics
+ *
+ * acpi_table_init() is separate to allow reading SRAT without
+ * other side effects.
+ *
+ * side effects of acpi_boot_init:
+ * acpi_lapic = 1 if LAPIC found
+ * acpi_ioapic = 1 if IOAPIC found
+ * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
+ * if acpi_blacklisted() acpi_disabled = 1;
+ * acpi_irq_model=...
+ * ...
+ *
+ * return value: (currently ignored)
+ * 0: success
+ * !0: failure
+ */
+
+int __init
+acpi_boot_table_init(void)
+{
+ int error;
+
+ /*
+ * If acpi_disabled, bail out
+ * One exception: acpi=ht continues far enough to enumerate LAPICs
+ */
+ if (acpi_disabled && !acpi_ht)
+ return 1;
+
+ /*
+ * Initialize the ACPI boot-time table parser.
+ */
+ error = acpi_table_init();
+ if (error) {
+ disable_acpi();
+ return error;
+ }
+
+#ifdef __i386__
+ check_acpi_pci();
+#endif
+
+ acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
+
+ /*
+ * blacklist may disable ACPI entirely
+ */
+ error = acpi_blacklisted();
+ if (error) {
+ extern int acpi_force;
+
+ if (acpi_force) {
+ printk(KERN_WARNING PREFIX "acpi=force override\n");
+ } else {
+ printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
+ disable_acpi();
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+
+int __init acpi_boot_init(void)
+{
+ /*
+ * If acpi_disabled, bail out
+ * One exception: acpi=ht continues far enough to enumerate LAPICs
+ */
+ if (acpi_disabled && !acpi_ht)
+ return 1;
+
+ acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
+
+ /*
+ * set sci_int and PM timer address
+ */
+ acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
+
+ /*
+ * Process the Multiple APIC Description Table (MADT), if present
+ */
+ acpi_process_madt();
+
+ acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
+ acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
+
+ return 0;
+}
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/apic.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/apic.c
new file mode 100644
index 0000000000..5f071a5956
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/apic.c
@@ -0,0 +1,83 @@
+/*
+ * Local APIC handling, local APIC timers
+ *
+ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *
+ * Fixes
+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
+ * thanks to Eric Gilmore
+ * and Rolf G. Tews
+ * for testing these extensively.
+ * Maciej W. Rozycki : Various updates and fixes.
+ * Mikael Pettersson : Power Management for UP-APIC.
+ * Pavel Machek and
+ * Mikael Pettersson : PM converted to driver model.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/mc146818rtc.h>
+#include <linux/kernel_stat.h>
+#include <linux/sysdev.h>
+
+#include <asm/atomic.h>
+#include <asm/smp.h>
+#include <asm/mtrr.h>
+#include <asm/mpspec.h>
+#include <asm/desc.h>
+#include <asm/arch_hooks.h>
+#include <asm/hpet.h>
+
+#include <mach_apic.h>
+
+#include "io_ports.h"
+
+/*
+ * Debug level
+ */
+int apic_verbosity;
+
+int get_physical_broadcast(void)
+{
+ return 0xff;
+}
+
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ printk("unexpected IRQ trap at vector %02x\n", irq);
+ /*
+ * Currently unexpected vectors happen only on SMP and APIC.
+ * We _must_ ack these because every local APIC has only N
+ * irq slots per priority level, and a 'hanging, unacked' IRQ
+ * holds up an irq slot - in excessive cases (when multiple
+ * unexpected vectors occur) that might lock up the APIC
+ * completely.
+ */
+ ack_APIC_irq();
+}
+
+/*
+ * This initializes the IO-APIC and APIC hardware if this is
+ * a UP kernel.
+ */
+int __init APIC_init_uniprocessor (void)
+{
+#ifdef CONFIG_X86_IO_APIC
+ if (smp_found_config)
+ if (!skip_ioapic_setup && nr_ioapics)
+ setup_IO_APIC();
+#endif
+
+ return 0;
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c
index e4092fbc67..c608ef099a 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c
@@ -564,7 +564,6 @@ void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
make_page_readonly((void *)va);
}
- flush_page_update_queue();
if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
BUG();
lgdt_finish();
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S
index af2fad5236..064be004e7 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S
@@ -80,23 +80,42 @@ VM_MASK = 0x00020000
#define evtchn_upcall_pending /* 0 */
#define evtchn_upcall_mask 1
-#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
-#define XEN_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
-#define XEN_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \
- movb evtchn_upcall_mask(reg), tmp; \
- movb tmp, off(%esp)
+#define sizeof_vcpu_shift 3
+
+#ifdef CONFIG_SMP
+#define preempt_disable(reg) incl TI_preempt_count(reg)
+#define preempt_enable(reg) decl TI_preempt_count(reg)
+#define XEN_GET_VCPU_INFO(reg) preempt_disable(%ebp) ; \
+ movl TI_cpu(%ebp),reg ; \
+ shl $sizeof_vcpu_shift,reg ; \
+ addl HYPERVISOR_shared_info,reg
+#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%ebp)
+#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
+#else
+#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
+#define XEN_PUT_VCPU_INFO(reg)
+#define XEN_PUT_VCPU_INFO_fixup
+#endif
+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
+ XEN_PUT_VCPU_INFO(reg)
+#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
+ XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
+ XEN_PUT_VCPU_INFO(reg)
#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
#ifdef CONFIG_PREEMPT
-#define preempt_stop XEN_BLOCK_EVENTS(%esi)
+#define preempt_stop GET_THREAD_INFO(%ebp) ; \
+ XEN_BLOCK_EVENTS(%esi)
#else
#define preempt_stop
#define resume_kernel restore_all
#endif
-#define SAVE_ALL_NO_EVENTMASK \
+#define SAVE_ALL \
cld; \
pushl %es; \
pushl %ds; \
@@ -111,11 +130,6 @@ VM_MASK = 0x00020000
movl %edx, %ds; \
movl %edx, %es;
-#define SAVE_ALL \
- SAVE_ALL_NO_EVENTMASK; \
- XEN_GET_VCPU_INFO(%esi); \
- XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK)
-
#define RESTORE_INT_REGS \
popl %ebx; \
popl %ecx; \
@@ -164,7 +178,6 @@ ENTRY(ret_from_fork)
call schedule_tail
GET_THREAD_INFO(%ebp)
popl %eax
- XEN_GET_VCPU_INFO(%esi)
jmp syscall_exit
/*
@@ -185,7 +198,6 @@ ret_from_intr:
testl $(VM_MASK | 2), %eax
jz resume_kernel # returning to kernel or vm86-space
ENTRY(resume_userspace)
- XEN_GET_VCPU_INFO(%esi)
XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
@@ -197,7 +209,6 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
- XEN_GET_VCPU_INFO(%esi)
XEN_BLOCK_EVENTS(%esi)
cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
jnz restore_all
@@ -284,9 +295,11 @@ restore_all:
jnz resume_vm86
movb EVENT_MASK(%esp), %al
notb %al # %al == ~saved_mask
+ XEN_GET_VCPU_INFO(%esi)
andb evtchn_upcall_mask(%esi),%al
andb $1,%al # %al == mask & ~saved_mask
jnz restore_all_enable_events # != 0 => reenable event delivery
+ XEN_PUT_VCPU_INFO(%esi)
RESTORE_ALL
resume_vm86:
@@ -436,8 +449,6 @@ error_code:
movl %ecx, %ds
movl %ecx, %es
movl %esp,%eax # pt_regs pointer
- XEN_GET_VCPU_INFO(%esi)
- XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
call *%edi
jmp ret_from_exception
@@ -454,27 +465,27 @@ error_code:
# activation and restart the handler using the previous one.
ENTRY(hypervisor_callback)
pushl %eax
- SAVE_ALL_NO_EVENTMASK
+ SAVE_ALL
movl EIP(%esp),%eax
cmpl $scrit,%eax
jb 11f
cmpl $ecrit,%eax
jb critical_region_fixup
-11: XEN_GET_VCPU_INFO(%esi)
- movb $0, EVENT_MASK(%esp)
- push %esp
+11: push %esp
call evtchn_do_upcall
add $4,%esp
jmp ret_from_intr
ALIGN
restore_all_enable_events:
- XEN_UNBLOCK_EVENTS(%esi)
+ XEN_LOCKED_UNBLOCK_EVENTS(%esi)
scrit: /**** START OF CRITICAL REGION ****/
XEN_TEST_PENDING(%esi)
jnz 14f # process more events if necessary...
+ XEN_PUT_VCPU_INFO(%esi)
RESTORE_ALL
-14: XEN_BLOCK_EVENTS(%esi)
+14: XEN_LOCKED_BLOCK_EVENTS(%esi)
+ XEN_PUT_VCPU_INFO(%esi)
jmp 11b
ecrit: /**** END OF CRITICAL REGION ****/
# [How we do the fixup]. We want to merge the current stack frame with the
@@ -487,24 +498,30 @@ ecrit: /**** END OF CRITICAL REGION ****/
critical_region_fixup:
addl $critical_fixup_table-scrit,%eax
movzbl (%eax),%eax # %eax contains num bytes popped
- mov %esp,%esi
+ cmpb $0xff,%al # 0xff => vcpu_info critical region
+ jne 15f
+ GET_THREAD_INFO(%ebp)
+ XEN_PUT_VCPU_INFO(%esi) # abort vcpu_info critical region
+ xorl %eax,%eax
+15: mov %esp,%esi
add %eax,%esi # %esi points at end of src region
mov %esp,%edi
add $0x34,%edi # %edi points at end of dst region
mov %eax,%ecx
shr $2,%ecx # convert words to bytes
- je 16f # skip loop if nothing to copy
-15: subl $4,%esi # pre-decrementing copy loop
+ je 17f # skip loop if nothing to copy
+16: subl $4,%esi # pre-decrementing copy loop
subl $4,%edi
movl (%esi),%eax
movl %eax,(%edi)
- loop 15b
-16: movl %edi,%esp # final %edi is top of merged stack
+ loop 16b
+17: movl %edi,%esp # final %edi is top of merged stack
jmp 11b
critical_fixup_table:
- .byte 0x00,0x00,0x00 # testb $0xff,(%esi) = XEN_TEST_PENDING
- .byte 0x00,0x00 # jnz 14f
+ .byte 0xff,0xff,0xff # testb $0xff,(%esi) = XEN_TEST_PENDING
+ .byte 0xff,0xff # jnz 14f
+ XEN_PUT_VCPU_INFO_fixup
.byte 0x00 # pop %ebx
.byte 0x04 # pop %ecx
.byte 0x08 # pop %edx
@@ -516,7 +533,8 @@ critical_fixup_table:
.byte 0x20 # pop %es
.byte 0x24,0x24,0x24 # add $4,%esp
.byte 0x28 # iret
- .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
+ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
+ XEN_PUT_VCPU_INFO_fixup
.byte 0x00,0x00 # jmp 11b
# Hypervisor uses this for application faults while it executes.
@@ -720,8 +738,6 @@ ENTRY(page_fault)
movl %eax, %ds
movl %eax, %es
movl %esp,%eax /* pt_regs pointer */
- XEN_GET_VCPU_INFO(%esi)
- XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
call do_page_fault
jmp ret_from_exception
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S
index 0564db4c26..91036572bc 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S
@@ -3,10 +3,9 @@
.section __xen_guest
.ascii "GUEST_OS=linux,GUEST_VER=2.6"
- .ascii ",XEN_VER=2.0"
+ .ascii ",XEN_VER=3.0"
.ascii ",VIRT_BASE=0xC0000000"
.ascii ",LOADER=generic"
- .ascii ",PT_MODE_WRITABLE"
.byte 0
.text
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/io_apic.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/io_apic.c
new file mode 100644
index 0000000000..882ff3fe9c
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/io_apic.c
@@ -0,0 +1,2611 @@
+/*
+ * Intel IO-APIC support for multi-Pentium hosts.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
+ *
+ * Many thanks to Stig Venaas for trying out countless experimental
+ * patches and reporting/debugging problems patiently!
+ *
+ * (c) 1999, Multiple IO-APIC support, developed by
+ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
+ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
+ * further tested and cleaned up by Zach Brown <zab@redhat.com>
+ * and Ingo Molnar <mingo@redhat.com>
+ *
+ * Fixes
+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
+ * thanks to Eric Gilmore
+ * and Rolf G. Tews
+ * for testing these extensively
+ * Paul Diefenbaugh : Added full ACPI support
+ */
+
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/config.h>
+#include <linux/smp_lock.h>
+#include <linux/mc146818rtc.h>
+#include <linux/compiler.h>
+#include <linux/acpi.h>
+
+#include <linux/sysdev.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/desc.h>
+#include <asm/timer.h>
+
+#include <mach_apic.h>
+
+#include "io_ports.h"
+
+#ifdef CONFIG_XEN
+
+#include <asm-xen/xen-public/xen.h>
+#include <asm-xen/xen-public/physdev.h>
+
+/* Fake i8259 */
+#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
+#define disable_8259A_irq(_irq) ((void)0)
+#define i8259A_irq_pending(_irq) (0)
+
+unsigned long io_apic_irqs;
+
+static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
+{
+ physdev_op_t op;
+ int ret;
+
+ op.cmd = PHYSDEVOP_APIC_READ;
+ op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
+ op.u.apic_op.offset = reg;
+ ret = HYPERVISOR_physdev_op(&op);
+ if (ret)
+ return ret;
+ return op.u.apic_op.value;
+}
+
+static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+{
+ physdev_op_t op;
+
+ op.cmd = PHYSDEVOP_APIC_WRITE;
+ op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
+ op.u.apic_op.offset = reg;
+ op.u.apic_op.value = value;
+ HYPERVISOR_physdev_op(&op);
+}
+
+#define io_apic_read(a,r) xen_io_apic_read(a,r)
+#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
+
+#endif /* CONFIG_XEN */
+
+int (*ioapic_renumber_irq)(int ioapic, int irq);
+atomic_t irq_mis_count;
+
+static DEFINE_SPINLOCK(ioapic_lock);
+
+/*
+ * Is the SiS APIC rmw bug present ?
+ * -1 = don't know, 0 = no, 1 = yes
+ */
+int sis_apic_bug = -1;
+
+/*
+ * # of IRQ routing registers
+ */
+int nr_ioapic_registers[MAX_IO_APICS];
+
+/*
+ * Rough estimation of how many shared IRQs there are, can
+ * be changed anytime.
+ */
+#define MAX_PLUS_SHARED_IRQS NR_IRQS
+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
+
+/*
+ * This is performance-critical, we want to do it O(1)
+ *
+ * the indexing order of this array favors 1:1 mappings
+ * between pins and IRQs.
+ */
+
+static struct irq_pin_list {
+ int apic, pin, next;
+} irq_2_pin[PIN_MAP_SIZE];
+
+int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
+#ifdef CONFIG_PCI_MSI
+#define vector_to_irq(vector) \
+ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
+#else
+#define vector_to_irq(vector) (vector)
+#endif
+
+/*
+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
+ * shared ISA-space IRQs, so we have to support them. We are super
+ * fast in the common case, and fast for shared ISA-space IRQs.
+ */
+static void add_pin_to_irq(unsigned int irq, int apic, int pin)
+{
+ static int first_free_entry = NR_IRQS;
+ struct irq_pin_list *entry = irq_2_pin + irq;
+
+ while (entry->next)
+ entry = irq_2_pin + entry->next;
+
+ if (entry->pin != -1) {
+ entry->next = first_free_entry;
+ entry = irq_2_pin + entry->next;
+ if (++first_free_entry >= PIN_MAP_SIZE)
+ panic("io_apic.c: whoops");
+ }
+ entry->apic = apic;
+ entry->pin = pin;
+}
+
+#ifndef CONFIG_XEN
+/*
+ * Reroute an IRQ to a different pin.
+ */
+static void __init replace_pin_at_irq(unsigned int irq,
+ int oldapic, int oldpin,
+ int newapic, int newpin)
+{
+ struct irq_pin_list *entry = irq_2_pin + irq;
+
+ while (1) {
+ if (entry->apic == oldapic && entry->pin == oldpin) {
+ entry->apic = newapic;
+ entry->pin = newpin;
+ }
+ if (!entry->next)
+ break;
+ entry = irq_2_pin + entry->next;
+ }
+}
+
+static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
+{
+ struct irq_pin_list *entry = irq_2_pin + irq;
+ unsigned int pin, reg;
+
+ for (;;) {
+ pin = entry->pin;
+ if (pin == -1)
+ break;
+ reg = io_apic_read(entry->apic, 0x10 + pin*2);
+ reg &= ~disable;
+ reg |= enable;
+ io_apic_modify(entry->apic, 0x10 + pin*2, reg);
+ if (!entry->next)
+ break;
+ entry = irq_2_pin + entry->next;
+ }
+}
+
+/* mask = 1 */
+static void __mask_IO_APIC_irq (unsigned int irq)
+{
+ __modify_IO_APIC_irq(irq, 0x00010000, 0);
+}
+
+/* mask = 0 */
+static void __unmask_IO_APIC_irq (unsigned int irq)
+{
+ __modify_IO_APIC_irq(irq, 0, 0x00010000);
+}
+
+/* mask = 1, trigger = 0 */
+static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
+{
+ __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
+}
+
+/* mask = 0, trigger = 1 */
+static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
+{
+ __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
+}
+
+static void mask_IO_APIC_irq (unsigned int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ __mask_IO_APIC_irq(irq);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+static void unmask_IO_APIC_irq (unsigned int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ __unmask_IO_APIC_irq(irq);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
+{
+ struct IO_APIC_route_entry entry;
+ unsigned long flags;
+
+ /* Check delivery_mode to be sure we're not clearing an SMI pin */
+ spin_lock_irqsave(&ioapic_lock, flags);
+ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
+ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ if (entry.delivery_mode == dest_SMI)
+ return;
+
+ /*
+ * Disable it in the IO-APIC irq-routing table:
+ */
+ memset(&entry, 0, sizeof(entry));
+ entry.mask = 1;
+ spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+static void clear_IO_APIC (void)
+{
+ int apic, pin;
+
+ for (apic = 0; apic < nr_ioapics; apic++)
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
+ clear_IO_APIC_pin(apic, pin);
+}
+
+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
+{
+ unsigned long flags;
+ int pin;
+ struct irq_pin_list *entry = irq_2_pin + irq;
+ unsigned int apicid_value;
+
+ apicid_value = cpu_mask_to_apicid(cpumask);
+ /* Prepare to do the io_apic_write */
+ apicid_value = apicid_value << 24;
+ spin_lock_irqsave(&ioapic_lock, flags);
+ for (;;) {
+ pin = entry->pin;
+ if (pin == -1)
+ break;
+ io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
+ if (!entry->next)
+ break;
+ entry = irq_2_pin + entry->next;
+ }
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+#else
+#define clear_IO_APIC() ((void)0)
+#endif
+
+#if defined(CONFIG_IRQBALANCE)
+# include <asm/processor.h> /* kernel_thread() */
+# include <linux/kernel_stat.h> /* kstat */
+# include <linux/slab.h> /* kmalloc() */
+# include <linux/timer.h> /* time_after() */
+
+# ifdef CONFIG_BALANCED_IRQ_DEBUG
+# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
+# define Dprintk(x...) do { TDprintk(x); } while (0)
+# else
+# define TDprintk(x...)
+# define Dprintk(x...)
+# endif
+
+cpumask_t __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS];
+
+#define IRQBALANCE_CHECK_ARCH -999
+static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH;
+static int physical_balance = 0;
+
+struct irq_cpu_info {
+ unsigned long * last_irq;
+ unsigned long * irq_delta;
+ unsigned long irq;
+} irq_cpu_data[NR_CPUS];
+
+#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
+#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq])
+#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq])
+
+#define IDLE_ENOUGH(cpu,now) \
+ (idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > 1))
+
+#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
+
+#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
+
+#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
+#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
+#define BALANCED_IRQ_MORE_DELTA (HZ/10)
+#define BALANCED_IRQ_LESS_DELTA (HZ)
+
+long balanced_irq_interval = MAX_BALANCED_IRQ_INTERVAL;
+
+static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
+ unsigned long now, int direction)
+{
+ int search_idle = 1;
+ int cpu = curr_cpu;
+
+ goto inside;
+
+ do {
+ if (unlikely(cpu == curr_cpu))
+ search_idle = 0;
+inside:
+ if (direction == 1) {
+ cpu++;
+ if (cpu >= NR_CPUS)
+ cpu = 0;
+ } else {
+ cpu--;
+ if (cpu == -1)
+ cpu = NR_CPUS-1;
+ }
+ } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
+ (search_idle && !IDLE_ENOUGH(cpu,now)));
+
+ return cpu;
+}
+
+static inline void balance_irq(int cpu, int irq)
+{
+ unsigned long now = jiffies;
+ cpumask_t allowed_mask;
+ unsigned int new_cpu;
+
+ if (irqbalance_disabled)
+ return;
+
+ cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
+ new_cpu = move(cpu, allowed_mask, now, 1);
+ if (cpu != new_cpu) {
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ pending_irq_balance_cpumask[irq] = cpumask_of_cpu(new_cpu);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
+{
+ int i, j;
+ Dprintk("Rotating IRQs among CPUs.\n");
+ for (i = 0; i < NR_CPUS; i++) {
+ for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) {
+ if (!irq_desc[j].action)
+ continue;
+ /* Is it a significant load ? */
+ if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
+ useful_load_threshold)
+ continue;
+ balance_irq(i, j);
+ }
+ }
+ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
+ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
+ return;
+}
+
+static void do_irq_balance(void)
+{
+ int i, j;
+ unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
+ unsigned long move_this_load = 0;
+ int max_loaded = 0, min_loaded = 0;
+ int load;
+ unsigned long useful_load_threshold = balanced_irq_interval + 10;
+ int selected_irq;
+ int tmp_loaded, first_attempt = 1;
+ unsigned long tmp_cpu_irq;
+ unsigned long imbalance = 0;
+ cpumask_t allowed_mask, target_cpu_mask, tmp;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ int package_index;
+ CPU_IRQ(i) = 0;
+ if (!cpu_online(i))
+ continue;
+ package_index = CPU_TO_PACKAGEINDEX(i);
+ for (j = 0; j < NR_IRQS; j++) {
+ unsigned long value_now, delta;
+ /* Is this an active IRQ? */
+ if (!irq_desc[j].action)
+ continue;
+ if ( package_index == i )
+ IRQ_DELTA(package_index,j) = 0;
+ /* Determine the total count per processor per IRQ */
+ value_now = (unsigned long) kstat_cpu(i).irqs[j];
+
+ /* Determine the activity per processor per IRQ */
+ delta = value_now - LAST_CPU_IRQ(i,j);
+
+ /* Update last_cpu_irq[][] for the next time */
+ LAST_CPU_IRQ(i,j) = value_now;
+
+ /* Ignore IRQs whose rate is less than the clock */
+ if (delta < useful_load_threshold)
+ continue;
+ /* update the load for the processor or package total */
+ IRQ_DELTA(package_index,j) += delta;
+
+ /* Keep track of the higher numbered sibling as well */
+ if (i != package_index)
+ CPU_IRQ(i) += delta;
+ /*
+ * We have sibling A and sibling B in the package
+ *
+ * cpu_irq[A] = load for cpu A + load for cpu B
+ * cpu_irq[B] = load for cpu B
+ */
+ CPU_IRQ(package_index) += delta;
+ }
+ }
+ /* Find the least loaded processor package */
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_online(i))
+ continue;
+ if (i != CPU_TO_PACKAGEINDEX(i))
+ continue;
+ if (min_cpu_irq > CPU_IRQ(i)) {
+ min_cpu_irq = CPU_IRQ(i);
+ min_loaded = i;
+ }
+ }
+ max_cpu_irq = ULONG_MAX;
+
+tryanothercpu:
+ /* Look for heaviest loaded processor.
+ * We may come back to get the next heaviest loaded processor.
+ * Skip processors with trivial loads.
+ */
+ tmp_cpu_irq = 0;
+ tmp_loaded = -1;
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_online(i))
+ continue;
+ if (i != CPU_TO_PACKAGEINDEX(i))
+ continue;
+ if (max_cpu_irq <= CPU_IRQ(i))
+ continue;
+ if (tmp_cpu_irq < CPU_IRQ(i)) {
+ tmp_cpu_irq = CPU_IRQ(i);
+ tmp_loaded = i;
+ }
+ }
+
+ if (tmp_loaded == -1) {
+ /* In the case of small number of heavy interrupt sources,
+ * loading some of the cpus too much. We use Ingo's original
+ * approach to rotate them around.
+ */
+ if (!first_attempt && imbalance >= useful_load_threshold) {
+ rotate_irqs_among_cpus(useful_load_threshold);
+ return;
+ }
+ goto not_worth_the_effort;
+ }
+
+ first_attempt = 0; /* heaviest search */
+ max_cpu_irq = tmp_cpu_irq; /* load */
+ max_loaded = tmp_loaded; /* processor */
+ imbalance = (max_cpu_irq - min_cpu_irq) / 2;
+
+ Dprintk("max_loaded cpu = %d\n", max_loaded);
+ Dprintk("min_loaded cpu = %d\n", min_loaded);
+ Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
+ Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
+ Dprintk("load imbalance = %lu\n", imbalance);
+
+ /* if imbalance is less than approx 10% of max load, then
+ * observe diminishing returns action. - quit
+ */
+ if (imbalance < (max_cpu_irq >> 3)) {
+ Dprintk("Imbalance too trivial\n");
+ goto not_worth_the_effort;
+ }
+
+tryanotherirq:
+ /* if we select an IRQ to move that can't go where we want, then
+ * see if there is another one to try.
+ */
+ move_this_load = 0;
+ selected_irq = -1;
+ for (j = 0; j < NR_IRQS; j++) {
+ /* Is this an active IRQ? */
+ if (!irq_desc[j].action)
+ continue;
+ if (imbalance <= IRQ_DELTA(max_loaded,j))
+ continue;
+ /* Try to find the IRQ that is closest to the imbalance
+ * without going over.
+ */
+ if (move_this_load < IRQ_DELTA(max_loaded,j)) {
+ move_this_load = IRQ_DELTA(max_loaded,j);
+ selected_irq = j;
+ }
+ }
+ if (selected_irq == -1) {
+ goto tryanothercpu;
+ }
+
+ imbalance = move_this_load;
+
+ /* For physical_balance case, we accumlated both load
+ * values in the one of the siblings cpu_irq[],
+ * to use the same code for physical and logical processors
+ * as much as possible.
+ *
+ * NOTE: the cpu_irq[] array holds the sum of the load for
+ * sibling A and sibling B in the slot for the lowest numbered
+ * sibling (A), _AND_ the load for sibling B in the slot for
+ * the higher numbered sibling.
+ *
+ * We seek the least loaded sibling by making the comparison
+ * (A+B)/2 vs B
+ */
+ load = CPU_IRQ(min_loaded) >> 1;
+ for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
+ if (load > CPU_IRQ(j)) {
+ /* This won't change cpu_sibling_map[min_loaded] */
+ load = CPU_IRQ(j);
+ min_loaded = j;
+ }
+ }
+
+ cpus_and(allowed_mask, cpu_online_map, irq_affinity[selected_irq]);
+ target_cpu_mask = cpumask_of_cpu(min_loaded);
+ cpus_and(tmp, target_cpu_mask, allowed_mask);
+
+ if (!cpus_empty(tmp)) {
+ irq_desc_t *desc = irq_desc + selected_irq;
+ unsigned long flags;
+
+ Dprintk("irq = %d moved to cpu = %d\n",
+ selected_irq, min_loaded);
+ /* mark for change destination */
+ spin_lock_irqsave(&desc->lock, flags);
+ pending_irq_balance_cpumask[selected_irq] =
+ cpumask_of_cpu(min_loaded);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ /* Since we made a change, come back sooner to
+ * check for more variation.
+ */
+ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
+ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
+ return;
+ }
+ goto tryanotherirq;
+
+not_worth_the_effort:
+ /*
+ * if we did not find an IRQ to move, then adjust the time interval
+ * upward
+ */
+ balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
+ balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
+ Dprintk("IRQ worth rotating not found\n");
+ return;
+}
+
+static int balanced_irq(void *unused)
+{
+ int i;
+ unsigned long prev_balance_time = jiffies;
+ long time_remaining = balanced_irq_interval;
+
+ daemonize("kirqd");
+
+ /* push everything to CPU 0 to give us a starting point. */
+ for (i = 0 ; i < NR_IRQS ; i++) {
+ pending_irq_balance_cpumask[i] = cpumask_of_cpu(0);
+ }
+
+ for ( ; ; ) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ time_remaining = schedule_timeout(time_remaining);
+ try_to_freeze(PF_FREEZE);
+ if (time_after(jiffies,
+ prev_balance_time+balanced_irq_interval)) {
+ do_irq_balance();
+ prev_balance_time = jiffies;
+ time_remaining = balanced_irq_interval;
+ }
+ }
+ return 0;
+}
+
+static int __init balanced_irq_init(void)
+{
+ int i;
+ struct cpuinfo_x86 *c;
+ cpumask_t tmp;
+
+ cpus_shift_right(tmp, cpu_online_map, 2);
+ c = &boot_cpu_data;
+ /* When not overwritten by the command line ask subarchitecture. */
+ if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
+ irqbalance_disabled = NO_BALANCE_IRQ;
+ if (irqbalance_disabled)
+ return 0;
+
+ /* disable irqbalance completely if there is only one processor online */
+ if (num_online_cpus() < 2) {
+ irqbalance_disabled = 1;
+ return 0;
+ }
+ /*
+ * Enable physical balance only if more than 1 physical processor
+ * is present
+ */
+ if (smp_num_siblings > 1 && !cpus_empty(tmp))
+ physical_balance = 1;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_online(i))
+ continue;
+ irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
+ irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
+ if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
+ printk(KERN_ERR "balanced_irq_init: out of memory");
+ goto failed;
+ }
+ memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
+ memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
+ }
+
+ printk(KERN_INFO "Starting balanced_irq\n");
+ if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0)
+ return 0;
+ else
+ printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
+failed:
+ for (i = 0; i < NR_CPUS; i++) {
+ if(irq_cpu_data[i].irq_delta)
+ kfree(irq_cpu_data[i].irq_delta);
+ if(irq_cpu_data[i].last_irq)
+ kfree(irq_cpu_data[i].last_irq);
+ }
+ return 0;
+}
+
+int __init irqbalance_disable(char *str)
+{
+ irqbalance_disabled = 1;
+ return 0;
+}
+
+__setup("noirqbalance", irqbalance_disable);
+
+static inline void move_irq(int irq)
+{
+ /* note - we hold the desc->lock */
+ if (unlikely(!cpus_empty(pending_irq_balance_cpumask[irq]))) {
+ set_ioapic_affinity_irq(irq, pending_irq_balance_cpumask[irq]);
+ cpus_clear(pending_irq_balance_cpumask[irq]);
+ }
+}
+
+late_initcall(balanced_irq_init);
+
+#else /* !CONFIG_IRQBALANCE */
+static inline void move_irq(int irq) { }
+#endif /* CONFIG_IRQBALANCE */
+
+#ifndef CONFIG_SMP
+void fastcall send_IPI_self(int vector)
+{
+#ifndef CONFIG_XEN
+ unsigned int cfg;
+
+ /*
+ * Wait for idle.
+ */
+ apic_wait_icr_idle();
+ cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
+ /*
+ * Send the IPI. The write to APIC_ICR fires this off.
+ */
+ apic_write_around(APIC_ICR, cfg);
+#endif
+}
+#endif /* !CONFIG_SMP */
+
+
+/*
+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
+ * specific CPU-side IRQs.
+ */
+
+#define MAX_PIRQS 8
+int pirq_entries [MAX_PIRQS];
+int pirqs_enabled;
+int skip_ioapic_setup;
+
+static int __init ioapic_setup(char *str)
+{
+ skip_ioapic_setup = 1;
+ return 1;
+}
+
+__setup("noapic", ioapic_setup);
+
+static int __init ioapic_pirq_setup(char *str)
+{
+ int i, max;
+ int ints[MAX_PIRQS+1];
+
+ get_options(str, ARRAY_SIZE(ints), ints);
+
+ for (i = 0; i < MAX_PIRQS; i++)
+ pirq_entries[i] = -1;
+
+ pirqs_enabled = 1;
+ apic_printk(APIC_VERBOSE, KERN_INFO
+ "PIRQ redirection, working around broken MP-BIOS.\n");
+ max = MAX_PIRQS;
+ if (ints[0] < MAX_PIRQS)
+ max = ints[0];
+
+ for (i = 0; i < max; i++) {
+ apic_printk(APIC_VERBOSE, KERN_DEBUG
+ "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
+ /*
+ * PIRQs are mapped upside down, usually.
+ */
+ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
+ }
+ return 1;
+}
+
+__setup("pirq=", ioapic_pirq_setup);
+
+/*
+ * Find the IRQ entry number of a certain pin.
+ */
+static int find_irq_entry(int apic, int pin, int type)
+{
+ int i;
+
+ for (i = 0; i < mp_irq_entries; i++)
+ if (mp_irqs[i].mpc_irqtype == type &&
+ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
+ mp_irqs[i].mpc_dstirq == pin)
+ return i;
+
+ return -1;
+}
+
+#ifndef CONFIG_XEN
+/*
+ * Find the pin to which IRQ[irq] (ISA) is connected
+ */
+static int find_isa_irq_pin(int irq, int type)
+{
+ int i;
+
+ for (i = 0; i < mp_irq_entries; i++) {
+ int lbus = mp_irqs[i].mpc_srcbus;
+
+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
+ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
+ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
+ ) &&
+ (mp_irqs[i].mpc_irqtype == type) &&
+ (mp_irqs[i].mpc_srcbusirq == irq))
+
+ return mp_irqs[i].mpc_dstirq;
+ }
+ return -1;
+}
+#endif
+
+/*
+ * Find a specific PCI IRQ entry.
+ * Not an __init, possibly needed by modules
+ */
+static int pin_2_irq(int idx, int apic, int pin);
+
+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
+{
+ int apic, i, best_guess = -1;
+
+ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
+ "slot:%d, pin:%d.\n", bus, slot, pin);
+ if (mp_bus_id_to_pci_bus[bus] == -1) {
+ printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
+ return -1;
+ }
+ for (i = 0; i < mp_irq_entries; i++) {
+ int lbus = mp_irqs[i].mpc_srcbus;
+
+ for (apic = 0; apic < nr_ioapics; apic++)
+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
+ break;
+
+ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
+ !mp_irqs[i].mpc_irqtype &&
+ (bus == lbus) &&
+ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
+ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
+
+ if (!(apic || IO_APIC_IRQ(irq)))
+ continue;
+
+ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
+ return irq;
+ /*
+ * Use the first all-but-pin matching entry as a
+ * best-guess fuzzy result for broken mptables.
+ */
+ if (best_guess < 0)
+ best_guess = irq;
+ }
+ }
+ return best_guess;
+}
+
+#ifndef CONFIG_XEN
+/*
+ * This function currently is only a helper for the i386 smp boot process where
+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
+ * so mask in all cases should simply be TARGET_CPUS
+ */
+void __init setup_ioapic_dest(void)
+{
+ int pin, ioapic, irq, irq_entry;
+
+ if (skip_ioapic_setup == 1)
+ return;
+
+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
+ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
+ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
+ if (irq_entry == -1)
+ continue;
+ irq = pin_2_irq(irq_entry, ioapic, pin);
+ set_ioapic_affinity_irq(irq, TARGET_CPUS);
+ }
+
+ }
+}
+#endif /* !CONFIG_XEN */
+
+/*
+ * EISA Edge/Level control register, ELCR
+ */
+static int EISA_ELCR(unsigned int irq)
+{
+ if (irq < 16) {
+ unsigned int port = 0x4d0 + (irq >> 3);
+ return (inb(port) >> (irq & 7)) & 1;
+ }
+ apic_printk(APIC_VERBOSE, KERN_INFO
+ "Broken MPtable reports ISA irq %d\n", irq);
+ return 0;
+}
+
+/* EISA interrupts are always polarity zero and can be edge or level
+ * trigger depending on the ELCR value. If an interrupt is listed as
+ * EISA conforming in the MP table, that means its trigger type must
+ * be read in from the ELCR */
+
+#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
+#define default_EISA_polarity(idx) (0)
+
+/* ISA interrupts are always polarity zero edge triggered,
+ * when listed as conforming in the MP table. */
+
+#define default_ISA_trigger(idx) (0)
+#define default_ISA_polarity(idx) (0)
+
+/* PCI interrupts are always polarity one level triggered,
+ * when listed as conforming in the MP table. */
+
+#define default_PCI_trigger(idx) (1)
+#define default_PCI_polarity(idx) (1)
+
+/* MCA interrupts are always polarity zero level triggered,
+ * when listed as conforming in the MP table. */
+
+#define default_MCA_trigger(idx) (1)
+#define default_MCA_polarity(idx) (0)
+
+/* NEC98 interrupts are always polarity zero edge triggered,
+ * when listed as conforming in the MP table. */
+
+#define default_NEC98_trigger(idx) (0)
+#define default_NEC98_polarity(idx) (0)
+
+static int __init MPBIOS_polarity(int idx)
+{
+ int bus = mp_irqs[idx].mpc_srcbus;
+ int polarity;
+
+ /*
+ * Determine IRQ line polarity (high active or low active):
+ */
+ switch (mp_irqs[idx].mpc_irqflag & 3)
+ {
+ case 0: /* conforms, ie. bus-type dependent polarity */
+ {
+ switch (mp_bus_id_to_type[bus])
+ {
+ case MP_BUS_ISA: /* ISA pin */
+ {
+ polarity = default_ISA_polarity(idx);
+ break;
+ }
+ case MP_BUS_EISA: /* EISA pin */
+ {
+ polarity = default_EISA_polarity(idx);
+ break;
+ }
+ case MP_BUS_PCI: /* PCI pin */
+ {
+ polarity = default_PCI_polarity(idx);
+ break;
+ }
+ case MP_BUS_MCA: /* MCA pin */
+ {
+ polarity = default_MCA_polarity(idx);
+ break;
+ }
+ case MP_BUS_NEC98: /* NEC 98 pin */
+ {
+ polarity = default_NEC98_polarity(idx);
+ break;
+ }
+ default:
+ {
+ printk(KERN_WARNING "broken BIOS!!\n");
+ polarity = 1;
+ break;
+ }
+ }
+ break;
+ }
+ case 1: /* high active */
+ {
+ polarity = 0;
+ break;
+ }
+ case 2: /* reserved */
+ {
+ printk(KERN_WARNING "broken BIOS!!\n");
+ polarity = 1;
+ break;
+ }
+ case 3: /* low active */
+ {
+ polarity = 1;
+ break;
+ }
+ default: /* invalid */
+ {
+ printk(KERN_WARNING "broken BIOS!!\n");
+ polarity = 1;
+ break;
+ }
+ }
+ return polarity;
+}
+
+static int MPBIOS_trigger(int idx)
+{
+ int bus = mp_irqs[idx].mpc_srcbus;
+ int trigger;
+
+ /*
+ * Determine IRQ trigger mode (edge or level sensitive):
+ */
+ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
+ {
+ case 0: /* conforms, ie. bus-type dependent */
+ {
+ switch (mp_bus_id_to_type[bus])
+ {
+ case MP_BUS_ISA: /* ISA pin */
+ {
+ trigger = default_ISA_trigger(idx);
+ break;
+ }
+ case MP_BUS_EISA: /* EISA pin */
+ {
+ trigger = default_EISA_trigger(idx);
+ break;
+ }
+ case MP_BUS_PCI: /* PCI pin */
+ {
+ trigger = default_PCI_trigger(idx);
+ break;
+ }
+ case MP_BUS_MCA: /* MCA pin */
+ {
+ trigger = default_MCA_trigger(idx);
+ break;
+ }
+ case MP_BUS_NEC98: /* NEC 98 pin */
+ {
+ trigger = default_NEC98_trigger(idx);
+ break;
+ }
+ default:
+ {
+ printk(KERN_WARNING "broken BIOS!!\n");
+ trigger = 1;
+ break;
+ }
+ }
+ break;
+ }
+ case 1: /* edge */
+ {
+ trigger = 0;
+ break;
+ }
+ case 2: /* reserved */
+ {
+ printk(KERN_WARNING "broken BIOS!!\n");
+ trigger = 1;
+ break;
+ }
+ case 3: /* level */
+ {
+ trigger = 1;
+ break;
+ }
+ default: /* invalid */
+ {
+ printk(KERN_WARNING "broken BIOS!!\n");
+ trigger = 0;
+ break;
+ }
+ }
+ return trigger;
+}
+
+static inline int irq_polarity(int idx)
+{
+ return MPBIOS_polarity(idx);
+}
+
+static inline int irq_trigger(int idx)
+{
+ return MPBIOS_trigger(idx);
+}
+
+static int pin_2_irq(int idx, int apic, int pin)
+{
+ int irq, i;
+ int bus = mp_irqs[idx].mpc_srcbus;
+
+ /*
+ * Debugging check, we are in big trouble if this message pops up!
+ */
+ if (mp_irqs[idx].mpc_dstirq != pin)
+ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
+
+ switch (mp_bus_id_to_type[bus])
+ {
+ case MP_BUS_ISA: /* ISA pin */
+ case MP_BUS_EISA:
+ case MP_BUS_MCA:
+ case MP_BUS_NEC98:
+ {
+ irq = mp_irqs[idx].mpc_srcbusirq;
+ break;
+ }
+ case MP_BUS_PCI: /* PCI pin */
+ {
+ /*
+ * PCI IRQs are mapped in order
+ */
+ i = irq = 0;
+ while (i < apic)
+ irq += nr_ioapic_registers[i++];
+ irq += pin;
+
+ /*
+ * For MPS mode, so far only needed by ES7000 platform
+ */
+ if (ioapic_renumber_irq)
+ irq = ioapic_renumber_irq(apic, irq);
+
+ break;
+ }
+ default:
+ {
+ printk(KERN_ERR "unknown bus type %d.\n",bus);
+ irq = 0;
+ break;
+ }
+ }
+
+ /*
+ * PCI IRQ command line redirection. Yes, limits are hardcoded.
+ */
+ if ((pin >= 16) && (pin <= 23)) {
+ if (pirq_entries[pin-16] != -1) {
+ if (!pirq_entries[pin-16]) {
+ apic_printk(APIC_VERBOSE, KERN_DEBUG
+ "disabling PIRQ%d\n", pin-16);
+ } else {
+ irq = pirq_entries[pin-16];
+ apic_printk(APIC_VERBOSE, KERN_DEBUG
+ "using PIRQ%d -> IRQ %d\n",
+ pin-16, irq);
+ }
+ }
+ }
+ return irq;
+}
+
+static inline int IO_APIC_irq_trigger(int irq)
+{
+ int apic, idx, pin;
+
+ for (apic = 0; apic < nr_ioapics; apic++) {
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+ idx = find_irq_entry(apic,pin,mp_INT);
+ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
+ return irq_trigger(idx);
+ }
+ }
+ /*
+ * nonexistent IRQs are edge default
+ */
+ return 0;
+}
+
+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
+u8 irq_vector[NR_IRQ_VECTORS]; /* = { FIRST_DEVICE_VECTOR , 0 }; */
+
+int assign_irq_vector(int irq)
+{
+ static int current_vector = FIRST_DEVICE_VECTOR;
+ physdev_op_t op;
+
+ BUG_ON(irq >= NR_IRQ_VECTORS);
+ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
+ return IO_APIC_VECTOR(irq);
+
+ op.cmd = PHYSDEVOP_ASSIGN_VECTOR;
+ op.u.irq_op.irq = irq;
+ if (HYPERVISOR_physdev_op(&op))
+ return -ENOSPC;
+ current_vector = op.u.irq_op.vector;
+
+ vector_irq[current_vector] = irq;
+ if (irq != AUTO_ASSIGN)
+ IO_APIC_VECTOR(irq) = current_vector;
+
+ return current_vector;
+}
+
+#ifndef CONFIG_XEN
+static struct hw_interrupt_type ioapic_level_type;
+static struct hw_interrupt_type ioapic_edge_type;
+
+#define IOAPIC_AUTO -1
+#define IOAPIC_EDGE 0
+#define IOAPIC_LEVEL 1
+
+static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
+{
+ if (use_pci_vector() && !platform_legacy_irq(irq)) {
+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+ trigger == IOAPIC_LEVEL)
+ irq_desc[vector].handler = &ioapic_level_type;
+ else
+ irq_desc[vector].handler = &ioapic_edge_type;
+ set_intr_gate(vector, interrupt[vector]);
+ } else {
+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+ trigger == IOAPIC_LEVEL)
+ irq_desc[irq].handler = &ioapic_level_type;
+ else
+ irq_desc[irq].handler = &ioapic_edge_type;
+ set_intr_gate(vector, interrupt[irq]);
+ }
+}
+#else
+#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
+#endif
+
+void __init setup_IO_APIC_irqs(void)
+{
+ struct IO_APIC_route_entry entry;
+ int apic, pin, idx, irq, first_notcon = 1, vector;
+ unsigned long flags;
+
+ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
+
+ for (apic = 0; apic < nr_ioapics; apic++) {
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+
+ /*
+ * add it to the IO-APIC irq-routing table:
+ */
+ memset(&entry,0,sizeof(entry));
+
+ entry.delivery_mode = INT_DELIVERY_MODE;
+ entry.dest_mode = INT_DEST_MODE;
+ entry.mask = 0; /* enable IRQ */
+ entry.dest.logical.logical_dest =
+ cpu_mask_to_apicid(TARGET_CPUS);
+
+ idx = find_irq_entry(apic,pin,mp_INT);
+ if (idx == -1) {
+ if (first_notcon) {
+ apic_printk(APIC_VERBOSE, KERN_DEBUG
+ " IO-APIC (apicid-pin) %d-%d",
+ mp_ioapics[apic].mpc_apicid,
+ pin);
+ first_notcon = 0;
+ } else
+ apic_printk(APIC_VERBOSE, ", %d-%d",
+ mp_ioapics[apic].mpc_apicid, pin);
+ continue;
+ }
+
+ entry.trigger = irq_trigger(idx);
+ entry.polarity = irq_polarity(idx);
+
+ if (irq_trigger(idx)) {
+ entry.trigger = 1;
+ entry.mask = 1;
+ }
+
+ irq = pin_2_irq(idx, apic, pin);
+ /*
+ * skip adding the timer int on secondary nodes, which causes
+ * a small but painful rift in the time-space continuum
+ */
+ if (multi_timer_check(apic, irq))
+ continue;
+ else
+ add_pin_to_irq(irq, apic, pin);
+
+ if (/*!apic &&*/ !IO_APIC_IRQ(irq))
+ continue;
+
+ if (IO_APIC_IRQ(irq)) {
+ vector = assign_irq_vector(irq);
+ entry.vector = vector;
+ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
+
+ if (!apic && (irq < 16))
+ disable_8259A_irq(irq);
+ }
+ spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+ }
+
+ if (!first_notcon)
+ apic_printk(APIC_VERBOSE, " not connected.\n");
+}
+
+/*
+ * Set up the 8259A-master output pin:
+ */
+#ifndef CONFIG_XEN
+void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
+{
+ struct IO_APIC_route_entry entry;
+ unsigned long flags;
+
+ memset(&entry,0,sizeof(entry));
+
+ disable_8259A_irq(0);
+
+ /* mask LVT0 */
+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
+
+ /*
+ * We use logical delivery to get the timer IRQ
+ * to the first CPU.
+ */
+ entry.dest_mode = INT_DEST_MODE;
+ entry.mask = 0; /* unmask IRQ now */
+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+ entry.delivery_mode = INT_DELIVERY_MODE;
+ entry.polarity = 0;
+ entry.trigger = 0;
+ entry.vector = vector;
+
+ /*
+ * The timer IRQ doesn't have to know that behind the
+ * scene we have a 8259A-master in AEOI mode ...
+ */
+ irq_desc[0].handler = &ioapic_edge_type;
+
+ /*
+ * Add it to the IO-APIC irq-routing table:
+ */
+ spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
+ io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ enable_8259A_irq(0);
+}
+
+static inline void UNEXPECTED_IO_APIC(void)
+{
+}
+
+void __init print_IO_APIC(void)
+{
+ int apic, i;
+ union IO_APIC_reg_00 reg_00;
+ union IO_APIC_reg_01 reg_01;
+ union IO_APIC_reg_02 reg_02;
+ union IO_APIC_reg_03 reg_03;
+ unsigned long flags;
+
+ if (apic_verbosity == APIC_QUIET)
+ return;
+
+ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
+ for (i = 0; i < nr_ioapics; i++)
+ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
+ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
+
+ /*
+ * We are a bit conservative about what we expect. We have to
+ * know about every hardware change ASAP.
+ */
+ printk(KERN_INFO "testing the IO APIC.......................\n");
+
+ for (apic = 0; apic < nr_ioapics; apic++) {
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ reg_00.raw = io_apic_read(apic, 0);
+ reg_01.raw = io_apic_read(apic, 1);
+ if (reg_01.bits.version >= 0x10)
+ reg_02.raw = io_apic_read(apic, 2);
+ if (reg_01.bits.version >= 0x20)
+ reg_03.raw = io_apic_read(apic, 3);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
+ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
+ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
+ printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
+ printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
+ if (reg_00.bits.ID >= get_physical_broadcast())
+ UNEXPECTED_IO_APIC();
+ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
+ UNEXPECTED_IO_APIC();
+
+ printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
+ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
+ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
+ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
+ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
+ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
+ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
+ (reg_01.bits.entries != 0x2E) &&
+ (reg_01.bits.entries != 0x3F)
+ )
+ UNEXPECTED_IO_APIC();
+
+ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
+ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
+ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
+ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
+ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
+ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
+ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
+ )
+ UNEXPECTED_IO_APIC();
+ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
+ UNEXPECTED_IO_APIC();
+
+ /*
+ * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
+ * but the value of reg_02 is read as the previous read register
+ * value, so ignore it if reg_02 == reg_01.
+ */
+ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
+ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
+ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
+ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
+ UNEXPECTED_IO_APIC();
+ }
+
+ /*
+ * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
+ * or reg_03, but the value of reg_0[23] is read as the previous read
+ * register value, so ignore it if reg_03 == reg_0[12].
+ */
+ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
+ reg_03.raw != reg_01.raw) {
+ printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
+ printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
+ if (reg_03.bits.__reserved_1)
+ UNEXPECTED_IO_APIC();
+ }
+
+ printk(KERN_DEBUG ".... IRQ redirection table:\n");
+
+ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
+ " Stat Dest Deli Vect: \n");
+
+ for (i = 0; i <= reg_01.bits.entries; i++) {
+ struct IO_APIC_route_entry entry;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
+ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ printk(KERN_DEBUG " %02x %03X %02X ",
+ i,
+ entry.dest.logical.logical_dest,
+ entry.dest.physical.physical_dest
+ );
+
+ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
+ entry.mask,
+ entry.trigger,
+ entry.irr,
+ entry.polarity,
+ entry.delivery_status,
+ entry.dest_mode,
+ entry.delivery_mode,
+ entry.vector
+ );
+ }
+ }
+ if (use_pci_vector())
+ printk(KERN_INFO "Using vector-based indexing\n");
+ printk(KERN_DEBUG "IRQ to pin mappings:\n");
+ for (i = 0; i < NR_IRQS; i++) {
+ struct irq_pin_list *entry = irq_2_pin + i;
+ if (entry->pin < 0)
+ continue;
+ if (use_pci_vector() && !platform_legacy_irq(i))
+ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
+ else
+ printk(KERN_DEBUG "IRQ%d ", i);
+ for (;;) {
+ printk("-> %d:%d", entry->apic, entry->pin);
+ if (!entry->next)
+ break;
+ entry = irq_2_pin + entry->next;
+ }
+ printk("\n");
+ }
+
+ printk(KERN_INFO ".................................... done.\n");
+
+ return;
+}
+
+static void print_APIC_bitfield (int base)
+{
+ unsigned int v;
+ int i, j;
+
+ if (apic_verbosity == APIC_QUIET)
+ return;
+
+ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
+ for (i = 0; i < 8; i++) {
+ v = apic_read(base + i*0x10);
+ for (j = 0; j < 32; j++) {
+ if (v & (1<<j))
+ printk("1");
+ else
+ printk("0");
+ }
+ printk("\n");
+ }
+}
+
+void /*__init*/ print_local_APIC(void * dummy)
+{
+ unsigned int v, ver, maxlvt;
+
+ if (apic_verbosity == APIC_QUIET)
+ return;
+
+ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
+ smp_processor_id(), hard_smp_processor_id());
+ v = apic_read(APIC_ID);
+ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
+ v = apic_read(APIC_LVR);
+ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
+ ver = GET_APIC_VERSION(v);
+ maxlvt = get_maxlvt();
+
+ v = apic_read(APIC_TASKPRI);
+ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
+
+ if (APIC_INTEGRATED(ver)) { /* !82489DX */
+ v = apic_read(APIC_ARBPRI);
+ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
+ v & APIC_ARBPRI_MASK);
+ v = apic_read(APIC_PROCPRI);
+ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
+ }
+
+ v = apic_read(APIC_EOI);
+ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
+ v = apic_read(APIC_RRR);
+ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
+ v = apic_read(APIC_LDR);
+ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
+ v = apic_read(APIC_DFR);
+ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
+ v = apic_read(APIC_SPIV);
+ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
+
+ printk(KERN_DEBUG "... APIC ISR field:\n");
+ print_APIC_bitfield(APIC_ISR);
+ printk(KERN_DEBUG "... APIC TMR field:\n");
+ print_APIC_bitfield(APIC_TMR);
+ printk(KERN_DEBUG "... APIC IRR field:\n");
+ print_APIC_bitfield(APIC_IRR);
+
+ if (APIC_INTEGRATED(ver)) { /* !82489DX */
+ if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
+ apic_write(APIC_ESR, 0);
+ v = apic_read(APIC_ESR);
+ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
+ }
+
+ v = apic_read(APIC_ICR);
+ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
+ v = apic_read(APIC_ICR2);
+ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
+
+ v = apic_read(APIC_LVTT);
+ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
+
+ if (maxlvt > 3) { /* PC is LVT#4. */
+ v = apic_read(APIC_LVTPC);
+ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
+ }
+ v = apic_read(APIC_LVT0);
+ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
+ v = apic_read(APIC_LVT1);
+ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
+
+ if (maxlvt > 2) { /* ERR is LVT#3. */
+ v = apic_read(APIC_LVTERR);
+ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
+ }
+
+ v = apic_read(APIC_TMICT);
+ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
+ v = apic_read(APIC_TMCCT);
+ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
+ v = apic_read(APIC_TDCR);
+ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
+ printk("\n");
+}
+
+void print_all_local_APICs (void)
+{
+ on_each_cpu(print_local_APIC, NULL, 1, 1);
+}
+
+void /*__init*/ print_PIC(void)
+{
+ extern spinlock_t i8259A_lock;
+ unsigned int v;
+ unsigned long flags;
+
+ if (apic_verbosity == APIC_QUIET)
+ return;
+
+ printk(KERN_DEBUG "\nprinting PIC contents\n");
+
+ spin_lock_irqsave(&i8259A_lock, flags);
+
+ v = inb(0xa1) << 8 | inb(0x21);
+ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
+
+ v = inb(0xa0) << 8 | inb(0x20);
+ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
+
+ outb(0x0b,0xa0);
+ outb(0x0b,0x20);
+ v = inb(0xa0) << 8 | inb(0x20);
+ outb(0x0a,0xa0);
+ outb(0x0a,0x20);
+
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+
+ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
+
+ v = inb(0x4d1) << 8 | inb(0x4d0);
+ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
+}
+#else
+void __init print_IO_APIC(void) { }
+#endif /* !CONFIG_XEN */
+
+static void __init enable_IO_APIC(void)
+{
+ union IO_APIC_reg_01 reg_01;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < PIN_MAP_SIZE; i++) {
+ irq_2_pin[i].pin = -1;
+ irq_2_pin[i].next = 0;
+ }
+ if (!pirqs_enabled)
+ for (i = 0; i < MAX_PIRQS; i++)
+ pirq_entries[i] = -1;
+
+ /*
+ * The number of IO-APIC IRQ registers (== #pins):
+ */
+ for (i = 0; i < nr_ioapics; i++) {
+ spin_lock_irqsave(&ioapic_lock, flags);
+ reg_01.raw = io_apic_read(i, 1);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ nr_ioapic_registers[i] = reg_01.bits.entries+1;
+ }
+
+ /*
+ * Do not trust the IO-APIC being empty at bootup
+ */
+ clear_IO_APIC();
+}
+
+/*
+ * Not an __init, needed by the reboot code
+ */
+void disable_IO_APIC(void)
+{
+ /*
+ * Clear the IO-APIC before rebooting:
+ */
+ clear_IO_APIC();
+
+#ifndef CONFIG_XEN
+ disconnect_bsp_APIC();
+#endif
+}
+
+/*
+ * function to set the IO-APIC physical IDs based on the
+ * values stored in the MPC table.
+ *
+ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
+ */
+
+#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
+static void __init setup_ioapic_ids_from_mpc(void)
+{
+ union IO_APIC_reg_00 reg_00;
+ physid_mask_t phys_id_present_map;
+ int apic;
+ int i;
+ unsigned char old_id;
+ unsigned long flags;
+
+ /*
+ * This is broken; anything with a real cpu count has to
+ * circumvent this idiocy regardless.
+ */
+ phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
+
+ /*
+ * Set the IOAPIC ID to the value stored in the MPC table.
+ */
+ for (apic = 0; apic < nr_ioapics; apic++) {
+
+ /* Read the register 0 value */
+ spin_lock_irqsave(&ioapic_lock, flags);
+ reg_00.raw = io_apic_read(apic, 0);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ old_id = mp_ioapics[apic].mpc_apicid;
+
+ if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
+ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
+ apic, mp_ioapics[apic].mpc_apicid);
+ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
+ reg_00.bits.ID);
+ mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
+ }
+
+ /* Don't check I/O APIC IDs for some xAPIC systems. They have
+ * no meaning without the serial APIC bus. */
+ if (NO_IOAPIC_CHECK)
+ continue;
+ /*
+ * Sanity check, is the ID really free? Every APIC in a
+ * system must have a unique ID or we get lots of nice
+ * 'stuck on smp_invalidate_needed IPI wait' messages.
+ */
+ if (check_apicid_used(phys_id_present_map,
+ mp_ioapics[apic].mpc_apicid)) {
+ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
+ apic, mp_ioapics[apic].mpc_apicid);
+ for (i = 0; i < get_physical_broadcast(); i++)
+ if (!physid_isset(i, phys_id_present_map))
+ break;
+ if (i >= get_physical_broadcast())
+ panic("Max APIC ID exceeded!\n");
+ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
+ i);
+ physid_set(i, phys_id_present_map);
+ mp_ioapics[apic].mpc_apicid = i;
+ } else {
+ physid_mask_t tmp;
+ tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
+ apic_printk(APIC_VERBOSE, "Setting %d in the "
+ "phys_id_present_map\n",
+ mp_ioapics[apic].mpc_apicid);
+ physids_or(phys_id_present_map, phys_id_present_map, tmp);
+ }
+
+
+ /*
+ * We need to adjust the IRQ routing table
+ * if the ID changed.
+ */
+ if (old_id != mp_ioapics[apic].mpc_apicid)
+ for (i = 0; i < mp_irq_entries; i++)
+ if (mp_irqs[i].mpc_dstapic == old_id)
+ mp_irqs[i].mpc_dstapic
+ = mp_ioapics[apic].mpc_apicid;
+
+ /*
+ * Read the right value from the MPC table and
+ * write it into the ID register.
+ */
+ apic_printk(APIC_VERBOSE, KERN_INFO
+ "...changing IO-APIC physical APIC ID to %d ...",
+ mp_ioapics[apic].mpc_apicid);
+
+ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
+ spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(apic, 0, reg_00.raw);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ /*
+ * Sanity check
+ */
+ spin_lock_irqsave(&ioapic_lock, flags);
+ reg_00.raw = io_apic_read(apic, 0);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
+ printk("could not set ID!\n");
+ else
+ apic_printk(APIC_VERBOSE, " ok.\n");
+ }
+}
+#else
+static void __init setup_ioapic_ids_from_mpc(void) { }
+#endif
+
+#ifndef CONFIG_XEN
+/*
+ * There is a nasty bug in some older SMP boards, their mptable lies
+ * about the timer IRQ. We do the following to work around the situation:
+ *
+ * - timer IRQ defaults to IO-APIC IRQ
+ * - if this function detects that timer IRQs are defunct, then we fall
+ * back to ISA timer IRQs
+ */
+static int __init timer_irq_works(void)
+{
+ unsigned long t1 = jiffies;
+
+ local_irq_enable();
+ /* Let ten ticks pass... */
+ mdelay((10 * 1000) / HZ);
+
+ /*
+ * Expect a few ticks at least, to be sure some possible
+ * glue logic does not lock up after one or two first
+ * ticks in a non-ExtINT mode. Also the local APIC
+ * might have cached one ExtINT interrupt. Finally, at
+ * least one tick may be lost due to delays.
+ */
+ if (jiffies - t1 > 4)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * In the SMP+IOAPIC case it might happen that there are an unspecified
+ * number of pending IRQ events unhandled. These cases are very rare,
+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
+ * better to do it this way as thus we do not have to be aware of
+ * 'pending' interrupts in the IRQ path, except at this point.
+ */
+/*
+ * Edge triggered needs to resend any interrupt
+ * that was delayed but this is now handled in the device
+ * independent code.
+ */
+
+/*
+ * Starting up a edge-triggered IO-APIC interrupt is
+ * nasty - we need to make sure that we get the edge.
+ * If it is already asserted for some reason, we need
+ * return 1 to indicate that is was pending.
+ *
+ * This is not complete - we should be able to fake
+ * an edge even if it isn't on the 8259A...
+ */
+static unsigned int startup_edge_ioapic_irq(unsigned int irq)
+{
+ int was_pending = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ if (irq < 16) {
+ disable_8259A_irq(irq);
+ if (i8259A_irq_pending(irq))
+ was_pending = 1;
+ }
+ __unmask_IO_APIC_irq(irq);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ return was_pending;
+}
+
+/*
+ * Once we have recorded IRQ_PENDING already, we can mask the
+ * interrupt for real. This prevents IRQ storms from unhandled
+ * devices.
+ */
+static void ack_edge_ioapic_irq(unsigned int irq)
+{
+ move_irq(irq);
+ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
+ == (IRQ_PENDING | IRQ_DISABLED))
+ mask_IO_APIC_irq(irq);
+ ack_APIC_irq();
+}
+
+/*
+ * Level triggered interrupts can just be masked,
+ * and shutting down and starting up the interrupt
+ * is the same as enabling and disabling them -- except
+ * with a startup need to return a "was pending" value.
+ *
+ * Level triggered interrupts are special because we
+ * do not touch any IO-APIC register while handling
+ * them. We ack the APIC in the end-IRQ handler, not
+ * in the start-IRQ-handler. Protection against reentrance
+ * from the same interrupt is still provided, both by the
+ * generic IRQ layer and by the fact that an unacked local
+ * APIC does not accept IRQs.
+ */
+static unsigned int startup_level_ioapic_irq (unsigned int irq)
+{
+ unmask_IO_APIC_irq(irq);
+
+ return 0; /* don't check for pending */
+}
+
+static void end_level_ioapic_irq (unsigned int irq)
+{
+ unsigned long v;
+ int i;
+
+ move_irq(irq);
+/*
+ * It appears there is an erratum which affects at least version 0x11
+ * of I/O APIC (that's the 82093AA and cores integrated into various
+ * chipsets). Under certain conditions a level-triggered interrupt is
+ * erroneously delivered as edge-triggered one but the respective IRR
+ * bit gets set nevertheless. As a result the I/O unit expects an EOI
+ * message but it will never arrive and further interrupts are blocked
+ * from the source. The exact reason is so far unknown, but the
+ * phenomenon was observed when two consecutive interrupt requests
+ * from a given source get delivered to the same CPU and the source is
+ * temporarily disabled in between.
+ *
+ * A workaround is to simulate an EOI message manually. We achieve it
+ * by setting the trigger mode to edge and then to level when the edge
+ * trigger mode gets detected in the TMR of a local APIC for a
+ * level-triggered interrupt. We mask the source for the time of the
+ * operation to prevent an edge-triggered interrupt escaping meanwhile.
+ * The idea is from Manfred Spraul. --macro
+ */
+ i = IO_APIC_VECTOR(irq);
+
+ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
+
+ ack_APIC_irq();
+
+ if (!(v & (1 << (i & 0x1f)))) {
+ atomic_inc(&irq_mis_count);
+ spin_lock(&ioapic_lock);
+ __mask_and_edge_IO_APIC_irq(irq);
+ __unmask_and_level_IO_APIC_irq(irq);
+ spin_unlock(&ioapic_lock);
+ }
+}
+
+#ifdef CONFIG_PCI_MSI
+static unsigned int startup_edge_ioapic_vector(unsigned int vector)
+{
+ int irq = vector_to_irq(vector);
+
+ return startup_edge_ioapic_irq(irq);
+}
+
+static void ack_edge_ioapic_vector(unsigned int vector)
+{
+ int irq = vector_to_irq(vector);
+
+ ack_edge_ioapic_irq(irq);
+}
+
+static unsigned int startup_level_ioapic_vector (unsigned int vector)
+{
+ int irq = vector_to_irq(vector);
+
+ return startup_level_ioapic_irq (irq);
+}
+
+static void end_level_ioapic_vector (unsigned int vector)
+{
+ int irq = vector_to_irq(vector);
+
+ end_level_ioapic_irq(irq);
+}
+
+static void mask_IO_APIC_vector (unsigned int vector)
+{
+ int irq = vector_to_irq(vector);
+
+ mask_IO_APIC_irq(irq);
+}
+
+static void unmask_IO_APIC_vector (unsigned int vector)
+{
+ int irq = vector_to_irq(vector);
+
+ unmask_IO_APIC_irq(irq);
+}
+
+static void set_ioapic_affinity_vector (unsigned int vector,
+ cpumask_t cpu_mask)
+{
+ int irq = vector_to_irq(vector);
+
+ set_ioapic_affinity_irq(irq, cpu_mask);
+}
+#endif
+
+/*
+ * Level and edge triggered IO-APIC interrupts need different handling,
+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
+ * handled with the level-triggered descriptor, but that one has slightly
+ * more overhead. Level-triggered interrupts cannot be handled with the
+ * edge-triggered handler, without risking IRQ storms and other ugly
+ * races.
+ */
+static struct hw_interrupt_type ioapic_edge_type = {
+ .typename = "IO-APIC-edge",
+ .startup = startup_edge_ioapic,
+ .shutdown = shutdown_edge_ioapic,
+ .enable = enable_edge_ioapic,
+ .disable = disable_edge_ioapic,
+ .ack = ack_edge_ioapic,
+ .end = end_edge_ioapic,
+ .set_affinity = set_ioapic_affinity,
+};
+
+static struct hw_interrupt_type ioapic_level_type = {
+ .typename = "IO-APIC-level",
+ .startup = startup_level_ioapic,
+ .shutdown = shutdown_level_ioapic,
+ .enable = enable_level_ioapic,
+ .disable = disable_level_ioapic,
+ .ack = mask_and_ack_level_ioapic,
+ .end = end_level_ioapic,
+ .set_affinity = set_ioapic_affinity,
+};
+#endif /* !CONFIG_XEN */
+
+static inline void init_IO_APIC_traps(void)
+{
+ int irq;
+
+ /*
+ * NOTE! The local APIC isn't very good at handling
+ * multiple interrupts at the same interrupt level.
+ * As the interrupt level is determined by taking the
+ * vector number and shifting that right by 4, we
+ * want to spread these out a bit so that they don't
+ * all fall in the same interrupt level.
+ *
+ * Also, we've got to be careful not to trash gate
+ * 0x80, because int 0x80 is hm, kind of importantish. ;)
+ */
+ for (irq = 0; irq < NR_IRQS ; irq++) {
+ int tmp = irq;
+ if (use_pci_vector()) {
+ if (!platform_legacy_irq(tmp))
+ if ((tmp = vector_to_irq(tmp)) == -1)
+ continue;
+ }
+ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
+ /*
+ * Hmm.. We don't have an entry for this,
+ * so default to an old-fashioned 8259
+ * interrupt if we can..
+ */
+ if (irq < 16)
+ make_8259A_irq(irq);
+#ifndef CONFIG_XEN
+ else
+ /* Strange. Oh, well.. */
+ irq_desc[irq].handler = &no_irq_type;
+#endif
+ }
+ }
+}
+
+#ifndef CONFIG_XEN
+static void enable_lapic_irq (unsigned int irq)
+{
+ unsigned long v;
+
+ v = apic_read(APIC_LVT0);
+ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
+}
+
+static void disable_lapic_irq (unsigned int irq)
+{
+ unsigned long v;
+
+ v = apic_read(APIC_LVT0);
+ apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
+}
+
+static void ack_lapic_irq (unsigned int irq)
+{
+ ack_APIC_irq();
+}
+
+static void end_lapic_irq (unsigned int i) { /* nothing */ }
+
+static struct hw_interrupt_type lapic_irq_type = {
+ .typename = "local-APIC-edge",
+ .startup = NULL, /* startup_irq() not used for IRQ0 */
+ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
+ .enable = enable_lapic_irq,
+ .disable = disable_lapic_irq,
+ .ack = ack_lapic_irq,
+ .end = end_lapic_irq
+};
+
+static void setup_nmi (void)
+{
+ /*
+ * Dirty trick to enable the NMI watchdog ...
+ * We put the 8259A master into AEOI mode and
+ * unmask on all local APICs LVT0 as NMI.
+ *
+ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
+ * is from Maciej W. Rozycki - so we do not have to EOI from
+ * the NMI handler or the timer interrupt.
+ */
+ apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
+
+ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
+
+ apic_printk(APIC_VERBOSE, " done.\n");
+}
+
+/*
+ * This looks a bit hackish but it's about the only one way of sending
+ * a few INTA cycles to 8259As and any associated glue logic. ICR does
+ * not support the ExtINT mode, unfortunately. We need to send these
+ * cycles as some i82489DX-based boards have glue logic that keeps the
+ * 8259A interrupt line asserted until INTA. --macro
+ */
+static inline void unlock_ExtINT_logic(void)
+{
+ int pin, i;
+ struct IO_APIC_route_entry entry0, entry1;
+ unsigned char save_control, save_freq_select;
+ unsigned long flags;
+
+ pin = find_isa_irq_pin(8, mp_INT);
+ if (pin == -1)
+ return;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ *(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin);
+ *(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ clear_IO_APIC_pin(0, pin);
+
+ memset(&entry1, 0, sizeof(entry1));
+
+ entry1.dest_mode = 0; /* physical delivery */
+ entry1.mask = 0; /* unmask IRQ now */
+ entry1.dest.physical.physical_dest = hard_smp_processor_id();
+ entry1.delivery_mode = dest_ExtINT;
+ entry1.polarity = entry0.polarity;
+ entry1.trigger = 0;
+ entry1.vector = 0;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
+ io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ save_control = CMOS_READ(RTC_CONTROL);
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
+ RTC_FREQ_SELECT);
+ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
+
+ i = 100;
+ while (i-- > 0) {
+ mdelay(10);
+ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
+ i -= 10;
+ }
+
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+ clear_IO_APIC_pin(0, pin);
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
+ io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+/*
+ * This code may look a bit paranoid, but it's supposed to cooperate with
+ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
+ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
+ * fanatically on his truly buggy board.
+ */
+static inline void check_timer(void)
+{
+ int pin1, pin2;
+ int vector;
+
+ /*
+ * get/set the timer IRQ vector:
+ */
+ disable_8259A_irq(0);
+ vector = assign_irq_vector(0);
+ set_intr_gate(vector, interrupt[0]);
+
+ /*
+ * Subtle, code in do_timer_interrupt() expects an AEOI
+ * mode for the 8259A whenever interrupts are routed
+ * through I/O APICs. Also IRQ0 has to be enabled in
+ * the 8259A which implies the virtual wire has to be
+ * disabled in the local APIC.
+ */
+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
+ init_8259A(1);
+ timer_ack = 1;
+ enable_8259A_irq(0);
+
+ pin1 = find_isa_irq_pin(0, mp_INT);
+ pin2 = find_isa_irq_pin(0, mp_ExtINT);
+
+ printk(KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2);
+
+ if (pin1 != -1) {
+ /*
+ * Ok, does IRQ0 through the IOAPIC work?
+ */
+ unmask_IO_APIC_irq(0);
+ if (timer_irq_works()) {
+ if (nmi_watchdog == NMI_IO_APIC) {
+ disable_8259A_irq(0);
+ setup_nmi();
+ enable_8259A_irq(0);
+ check_nmi_watchdog();
+ }
+ return;
+ }
+ clear_IO_APIC_pin(0, pin1);
+ printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
+ }
+
+ printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
+ if (pin2 != -1) {
+ printk("\n..... (found pin %d) ...", pin2);
+ /*
+ * legacy devices should be connected to IO APIC #0
+ */
+ setup_ExtINT_IRQ0_pin(pin2, vector);
+ if (timer_irq_works()) {
+ printk("works.\n");
+ if (pin1 != -1)
+ replace_pin_at_irq(0, 0, pin1, 0, pin2);
+ else
+ add_pin_to_irq(0, 0, pin2);
+ if (nmi_watchdog == NMI_IO_APIC) {
+ setup_nmi();
+ check_nmi_watchdog();
+ }
+ return;
+ }
+ /*
+ * Cleanup, just in case ...
+ */
+ clear_IO_APIC_pin(0, pin2);
+ }
+ printk(" failed.\n");
+
+ if (nmi_watchdog == NMI_IO_APIC) {
+ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
+ nmi_watchdog = 0;
+ }
+
+ printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
+
+ disable_8259A_irq(0);
+ irq_desc[0].handler = &lapic_irq_type;
+ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
+ enable_8259A_irq(0);
+
+ if (timer_irq_works()) {
+ printk(" works.\n");
+ return;
+ }
+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
+ printk(" failed.\n");
+
+ printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
+
+ timer_ack = 0;
+ init_8259A(0);
+ make_8259A_irq(0);
+ apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
+
+ unlock_ExtINT_logic();
+
+ if (timer_irq_works()) {
+ printk(" works.\n");
+ return;
+ }
+ printk(" failed :(.\n");
+ panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
+ "report. Then try booting with the 'noapic' option");
+}
+#else
+#define check_timer() ((void)0)
+#endif
+
+/*
+ *
+ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
+ * Linux doesn't really care, as it's not actually used
+ * for any interrupt handling anyway.
+ */
+#define PIC_IRQS (1 << PIC_CASCADE_IR)
+
+void __init setup_IO_APIC(void)
+{
+ enable_IO_APIC();
+
+ if (acpi_ioapic)
+ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
+ else
+ io_apic_irqs = ~PIC_IRQS;
+
+ printk("ENABLING IO-APIC IRQs\n");
+
+ /*
+ * Set up IO-APIC IRQ routing.
+ */
+ if (!acpi_ioapic)
+ setup_ioapic_ids_from_mpc();
+#ifndef CONFIG_XEN
+ sync_Arb_IDs();
+#endif
+ setup_IO_APIC_irqs();
+ init_IO_APIC_traps();
+ check_timer();
+ if (!acpi_ioapic)
+ print_IO_APIC();
+}
+
+/*
+ * Called after all the initialization is done. If we didnt find any
+ * APIC bugs then we can allow the modify fast path
+ */
+
+static int __init io_apic_bug_finalize(void)
+{
+ if(sis_apic_bug == -1)
+ sis_apic_bug = 0;
+ return 0;
+}
+
+late_initcall(io_apic_bug_finalize);
+
+struct sysfs_ioapic_data {
+ struct sys_device dev;
+ struct IO_APIC_route_entry entry[0];
+};
+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
+
+static int ioapic_suspend(struct sys_device *dev, u32 state)
+{
+ struct IO_APIC_route_entry *entry;
+ struct sysfs_ioapic_data *data;
+ unsigned long flags;
+ int i;
+
+ data = container_of(dev, struct sysfs_ioapic_data, dev);
+ entry = data->entry;
+ spin_lock_irqsave(&ioapic_lock, flags);
+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
+ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
+ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
+ }
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ return 0;
+}
+
+static int ioapic_resume(struct sys_device *dev)
+{
+ struct IO_APIC_route_entry *entry;
+ struct sysfs_ioapic_data *data;
+ unsigned long flags;
+ union IO_APIC_reg_00 reg_00;
+ int i;
+
+ data = container_of(dev, struct sysfs_ioapic_data, dev);
+ entry = data->entry;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ reg_00.raw = io_apic_read(dev->id, 0);
+ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
+ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
+ io_apic_write(dev->id, 0, reg_00.raw);
+ }
+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
+ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
+ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
+ }
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ return 0;
+}
+
+static struct sysdev_class ioapic_sysdev_class = {
+ set_kset_name("ioapic"),
+ .suspend = ioapic_suspend,
+ .resume = ioapic_resume,
+};
+
+static int __init ioapic_init_sysfs(void)
+{
+ struct sys_device * dev;
+ int i, size, error = 0;
+
+ error = sysdev_class_register(&ioapic_sysdev_class);
+ if (error)
+ return error;
+
+ for (i = 0; i < nr_ioapics; i++ ) {
+ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
+ * sizeof(struct IO_APIC_route_entry);
+ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
+ if (!mp_ioapic_data[i]) {
+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
+ continue;
+ }
+ memset(mp_ioapic_data[i], 0, size);
+ dev = &mp_ioapic_data[i]->dev;
+ dev->id = i;
+ dev->cls = &ioapic_sysdev_class;
+ error = sysdev_register(dev);
+ if (error) {
+ kfree(mp_ioapic_data[i]);
+ mp_ioapic_data[i] = NULL;
+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+device_initcall(ioapic_init_sysfs);
+
+/* --------------------------------------------------------------------------
+ ACPI-based IOAPIC Configuration
+ -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_BOOT
+
+int __init io_apic_get_unique_id (int ioapic, int apic_id)
+{
+#ifndef CONFIG_XEN
+ union IO_APIC_reg_00 reg_00;
+ static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
+ physid_mask_t tmp;
+ unsigned long flags;
+ int i = 0;
+
+ /*
+ * The P4 platform supports up to 256 APIC IDs on two separate APIC
+ * buses (one for LAPICs, one for IOAPICs), where predecessors only
+ * supports up to 16 on one shared APIC bus.
+ *
+ * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
+ * advantage of new APIC bus architecture.
+ */
+
+ if (physids_empty(apic_id_map))
+ apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ reg_00.raw = io_apic_read(ioapic, 0);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ if (apic_id >= get_physical_broadcast()) {
+ printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
+ "%d\n", ioapic, apic_id, reg_00.bits.ID);
+ apic_id = reg_00.bits.ID;
+ }
+
+ /*
+ * Every APIC in a system must have a unique ID or we get lots of nice
+ * 'stuck on smp_invalidate_needed IPI wait' messages.
+ */
+ if (check_apicid_used(apic_id_map, apic_id)) {
+
+ for (i = 0; i < get_physical_broadcast(); i++) {
+ if (!check_apicid_used(apic_id_map, i))
+ break;
+ }
+
+ if (i == get_physical_broadcast())
+ panic("Max apic_id exceeded!\n");
+
+ printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
+ "trying %d\n", ioapic, apic_id, i);
+
+ apic_id = i;
+ }
+
+ tmp = apicid_to_cpu_present(apic_id);
+ physids_or(apic_id_map, apic_id_map, tmp);
+
+ if (reg_00.bits.ID != apic_id) {
+ reg_00.bits.ID = apic_id;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(ioapic, 0, reg_00.raw);
+ reg_00.raw = io_apic_read(ioapic, 0);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ /* Sanity check */
+ if (reg_00.bits.ID != apic_id)
+ panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic);
+ }
+
+ apic_printk(APIC_VERBOSE, KERN_INFO
+ "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
+#endif /* !CONFIG_XEN */
+
+ return apic_id;
+}
+
+
+int __init io_apic_get_version (int ioapic)
+{
+ union IO_APIC_reg_01 reg_01;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ reg_01.raw = io_apic_read(ioapic, 1);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ return reg_01.bits.version;
+}
+
+
+int __init io_apic_get_redir_entries (int ioapic)
+{
+ union IO_APIC_reg_01 reg_01;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ reg_01.raw = io_apic_read(ioapic, 1);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ return reg_01.bits.entries;
+}
+
+
+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
+{
+ struct IO_APIC_route_entry entry;
+ unsigned long flags;
+
+ if (!IO_APIC_IRQ(irq)) {
+ printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
+ ioapic);
+ return -EINVAL;
+ }
+
+ /*
+ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
+ * Note that we mask (disable) IRQs now -- these get enabled when the
+ * corresponding device driver registers for this IRQ.
+ */
+
+ memset(&entry,0,sizeof(entry));
+
+ entry.delivery_mode = INT_DELIVERY_MODE;
+ entry.dest_mode = INT_DEST_MODE;
+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+ entry.trigger = edge_level;
+ entry.polarity = active_high_low;
+ entry.mask = 1;
+
+ /*
+ * IRQs < 16 are already in the irq_2_pin[] map
+ */
+ if (irq >= 16)
+ add_pin_to_irq(irq, ioapic, pin);
+
+ entry.vector = assign_irq_vector(irq);
+
+ apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
+ "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
+ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
+ edge_level, active_high_low);
+
+ ioapic_register_intr(irq, entry.vector, edge_level);
+
+ if (!ioapic && (irq < 16))
+ disable_8259A_irq(irq);
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
+ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ return 0;
+}
+
+#endif /*CONFIG_ACPI_BOOT*/
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ioport.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ioport.c
index 89c1c7e38f..3aa6c5a4cf 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ioport.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ioport.c
@@ -1,19 +1,110 @@
+/*
+ * linux/arch/i386/kernel/ioport.c
+ *
+ * This contains the io-permission bitmap code - written by obz, with changes
+ * by Linus.
+ */
+
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/ioport.h>
-#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/slab.h>
-#include <asm-xen/xen-public/dom0_ops.h>
+#include <linux/thread_info.h>
+#include <asm-xen/xen-public/physdev.h>
+
+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
+static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
+{
+ unsigned long mask;
+ unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
+ unsigned int low_index = base & (BITS_PER_LONG-1);
+ int length = low_index + extent;
+
+ if (low_index != 0) {
+ mask = (~0UL << low_index);
+ if (length < BITS_PER_LONG)
+ mask &= ~(~0UL << length);
+ if (new_value)
+ *bitmap_base++ |= mask;
+ else
+ *bitmap_base++ &= ~mask;
+ length -= BITS_PER_LONG;
+ }
+
+ mask = (new_value ? ~0UL : 0UL);
+ while (length >= BITS_PER_LONG) {
+ *bitmap_base++ = mask;
+ length -= BITS_PER_LONG;
+ }
+
+ if (length > 0) {
+ mask = ~(~0UL << length);
+ if (new_value)
+ *bitmap_base++ |= mask;
+ else
+ *bitmap_base++ &= ~mask;
+ }
+}
+
+
+/*
+ * this changes the io permissions bitmap in the current task.
+ */
+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+{
+ struct thread_struct * t = &current->thread;
+ unsigned long *bitmap;
+ physdev_op_t op;
+
+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+ return -EINVAL;
+ if (turn_on && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ /*
+ * If it's the first ioperm() call in this thread's lifetime, set the
+ * IO bitmap up. ioperm() is much less timing critical than clone(),
+ * this is why we delay this operation until now:
+ */
+ if (!t->io_bitmap_ptr) {
+ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+ if (!bitmap)
+ return -ENOMEM;
+
+ memset(bitmap, 0xff, IO_BITMAP_BYTES);
+ t->io_bitmap_ptr = bitmap;
+
+ op.cmd = PHYSDEVOP_SET_IOBITMAP;
+ op.u.set_iobitmap.bitmap = (unsigned long)bitmap;
+ op.u.set_iobitmap.nr_ports = IO_BITMAP_BITS;
+ HYPERVISOR_physdev_op(&op);
+ }
+
+ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
+
+ return 0;
+}
+
+/*
+ * sys_iopl has to be used when you want to access the IO ports
+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
+ *
+ * Here we just change the eflags value on the stack: we allow
+ * only the super-user to do it. This depends on the stack-layout
+ * on system-call entry - see also fork() and the signal handling
+ * code.
+ */
asmlinkage long sys_iopl(unsigned int new_io_pl)
{
unsigned int old_io_pl = current->thread.io_pl;
- dom0_op_t op;
+ physdev_op_t op;
if (new_io_pl > 3)
return -EINVAL;
@@ -22,9 +113,6 @@ asmlinkage long sys_iopl(unsigned int new_io_pl)
if ((new_io_pl > old_io_pl) && !capable(CAP_SYS_RAWIO))
return -EPERM;
- if (!(xen_start_info.flags & SIF_PRIVILEGED))
- return -EPERM;
-
/* Maintain OS privileges even if user attempts to relinquish them. */
if (new_io_pl == 0)
new_io_pl = 1;
@@ -33,19 +121,9 @@ asmlinkage long sys_iopl(unsigned int new_io_pl)
current->thread.io_pl = new_io_pl;
/* Force the change at ring 0. */
- op.cmd = DOM0_IOPL;
- op.u.iopl.domain = DOMID_SELF;
- op.u.iopl.iopl = new_io_pl;
- HYPERVISOR_dom0_op(&op);
+ op.cmd = PHYSDEVOP_SET_IOPL;
+ op.u.set_iopl.iopl = new_io_pl;
+ HYPERVISOR_physdev_op(&op);
return 0;
}
-
-asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
-{
-#if 0
- printk(KERN_INFO "ioperm not fully supported - %s\n",
- turn_on ? "set iopl to 3" : "ignore resource release");
-#endif
- return turn_on ? sys_iopl(3) : 0;
-}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/irq.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/irq.c
new file mode 100644
index 0000000000..f31697ecb1
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/irq.c
@@ -0,0 +1,297 @@
+/*
+ * linux/arch/i386/kernel/irq.c
+ *
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file contains the lowest level x86-specific interrupt
+ * entry, irq-stacks and irq statistics code. All the remaining
+ * irq logic is done by the generic kernel/irq/ code and
+ * by the x86-specific irq controller code. (e.g. i8259.c and
+ * io_apic.c.)
+ */
+
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+
+#ifndef CONFIG_X86_LOCAL_APIC
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ printk("unexpected IRQ trap at vector %02x\n", irq);
+}
+#endif
+
+#ifdef CONFIG_4KSTACKS
+/*
+ * per-CPU IRQ handling contexts (thread information and stack)
+ */
+union irq_ctx {
+ struct thread_info tinfo;
+ u32 stack[THREAD_SIZE/sizeof(u32)];
+};
+
+static union irq_ctx *hardirq_ctx[NR_CPUS];
+static union irq_ctx *softirq_ctx[NR_CPUS];
+#endif
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+fastcall unsigned int do_IRQ(struct pt_regs *regs)
+{
+ /* high bits used in ret_from_ code */
+ int irq = regs->orig_eax & __IRQ_MASK(HARDIRQ_BITS);
+#ifdef CONFIG_4KSTACKS
+ union irq_ctx *curctx, *irqctx;
+ u32 *isp;
+#endif
+
+ irq_enter();
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ /* Debugging check for stack overflow: is there less than 1KB free? */
+ {
+ long esp;
+
+ __asm__ __volatile__("andl %%esp,%0" :
+ "=r" (esp) : "0" (THREAD_SIZE - 1));
+ if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
+ printk("do_IRQ: stack overflow: %ld\n",
+ esp - sizeof(struct thread_info));
+ dump_stack();
+ }
+ }
+#endif
+
+#ifdef CONFIG_4KSTACKS
+
+ curctx = (union irq_ctx *) current_thread_info();
+ irqctx = hardirq_ctx[smp_processor_id()];
+
+ /*
+ * this is where we switch to the IRQ stack. However, if we are
+ * already using the IRQ stack (because we interrupted a hardirq
+ * handler) we can't do that and just have to keep using the
+ * current stack (which is the irq stack already after all)
+ */
+ if (curctx != irqctx) {
+ int arg1, arg2, ebx;
+
+ /* build the stack frame on the IRQ stack */
+ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
+ irqctx->tinfo.task = curctx->tinfo.task;
+ irqctx->tinfo.previous_esp = current_stack_pointer;
+
+ asm volatile(
+ " xchgl %%ebx,%%esp \n"
+ " call __do_IRQ \n"
+ " movl %%ebx,%%esp \n"
+ : "=a" (arg1), "=d" (arg2), "=b" (ebx)
+ : "0" (irq), "1" (regs), "2" (isp)
+ : "memory", "cc", "ecx"
+ );
+ } else
+#endif
+ __do_IRQ(irq, regs);
+
+ irq_exit();
+
+ return 1;
+}
+
+#ifdef CONFIG_4KSTACKS
+
+/*
+ * These should really be __section__(".bss.page_aligned") as well, but
+ * gcc's 3.0 and earlier don't handle that correctly.
+ */
+static char softirq_stack[NR_CPUS * THREAD_SIZE]
+ __attribute__((__aligned__(THREAD_SIZE)));
+
+static char hardirq_stack[NR_CPUS * THREAD_SIZE]
+ __attribute__((__aligned__(THREAD_SIZE)));
+
+/*
+ * allocate per-cpu stacks for hardirq and for softirq processing
+ */
+void irq_ctx_init(int cpu)
+{
+ union irq_ctx *irqctx;
+
+ if (hardirq_ctx[cpu])
+ return;
+
+ irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
+ irqctx->tinfo.task = NULL;
+ irqctx->tinfo.exec_domain = NULL;
+ irqctx->tinfo.cpu = cpu;
+ irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+
+ hardirq_ctx[cpu] = irqctx;
+
+ irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
+ irqctx->tinfo.task = NULL;
+ irqctx->tinfo.exec_domain = NULL;
+ irqctx->tinfo.cpu = cpu;
+ irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET;
+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+
+ softirq_ctx[cpu] = irqctx;
+
+ printk("CPU %u irqstacks, hard=%p soft=%p\n",
+ cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
+}
+
+extern asmlinkage void __do_softirq(void);
+
+asmlinkage void do_softirq(void)
+{
+ unsigned long flags;
+ struct thread_info *curctx;
+ union irq_ctx *irqctx;
+ u32 *isp;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+
+ if (local_softirq_pending()) {
+ curctx = current_thread_info();
+ irqctx = softirq_ctx[smp_processor_id()];
+ irqctx->tinfo.task = curctx->task;
+ irqctx->tinfo.previous_esp = current_stack_pointer;
+
+ /* build the stack frame on the softirq stack */
+ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
+
+ asm volatile(
+ " xchgl %%ebx,%%esp \n"
+ " call __do_softirq \n"
+ " movl %%ebx,%%esp \n"
+ : "=b"(isp)
+ : "0"(isp)
+ : "memory", "cc", "edx", "ecx", "eax"
+ );
+ }
+
+ local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(do_softirq);
+#endif
+
+/*
+ * Interrupt statistics:
+ */
+
+atomic_t irq_err_count;
+
+/*
+ * /proc/interrupts printing:
+ */
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, j;
+ struct irqaction * action;
+ unsigned long flags;
+
+ if (i == 0) {
+ seq_printf(p, " ");
+ for_each_cpu(j)
+ seq_printf(p, "CPU%d ",j);
+ seq_putc(p, '\n');
+ }
+
+ if (i < NR_IRQS) {
+ spin_lock_irqsave(&irq_desc[i].lock, flags);
+ action = irq_desc[i].action;
+ if (!action)
+ goto skip;
+ seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+ for_each_cpu(j)
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#endif
+ seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %s", action->name);
+
+ for (action=action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+
+ seq_putc(p, '\n');
+skip:
+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ } else if (i == NR_IRQS) {
+ seq_printf(p, "NMI: ");
+ for_each_cpu(j)
+ seq_printf(p, "%10u ", nmi_count(j));
+ seq_putc(p, '\n');
+#ifdef CONFIG_X86_LOCAL_APIC
+ seq_printf(p, "LOC: ");
+ for_each_cpu(j)
+ seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
+ seq_putc(p, '\n');
+#endif
+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+#if defined(CONFIG_X86_IO_APIC)
+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+#endif
+ }
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+void fixup_irqs(cpumask_t map)
+{
+ unsigned int irq;
+ static int warned;
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ cpumask_t mask;
+ if (irq == 2)
+ continue;
+
+ cpus_and(mask, irq_affinity[irq], map);
+ if (any_online_cpu(mask) == NR_CPUS) {
+ printk("Breaking affinity for irq %i\n", irq);
+ mask = map;
+ }
+ if (irq_desc[irq].handler->set_affinity)
+ irq_desc[irq].handler->set_affinity(irq, mask);
+ else if (irq_desc[irq].action && !(warned++))
+ printk("Cannot set affinity for irq %i\n", irq);
+ }
+
+#if 0
+ barrier();
+ /* Ingo Molnar says: "after the IO-APIC masks have been redirected
+ [note the nop - the interrupt-enable boundary on x86 is two
+ instructions from sti] - to flush out pending hardirqs and
+ IPIs. After this point nothing is supposed to reach this CPU." */
+ __asm__ __volatile__("sti; nop; cli");
+ barrier();
+#else
+ /* That doesn't seem sufficient. Give it 1ms. */
+ local_irq_enable();
+ mdelay(1);
+ local_irq_disable();
+#endif
+}
+#endif
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ldt.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ldt.c
index a5a80e4ea7..cee69b73ed 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ldt.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ldt.c
@@ -72,7 +72,6 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
if (oldsize) {
make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
PAGE_SIZE);
- flush_page_update_queue();
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(oldldt);
else
@@ -89,7 +88,6 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
PAGE_SIZE);
- flush_page_update_queue();
return 0;
}
@@ -102,8 +100,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
struct mm_struct * old_mm;
int retval = 0;
+ memset(&mm->context, 0, sizeof(mm->context));
init_MUTEX(&mm->context.sem);
- mm->context.size = 0;
old_mm = current->mm;
if (old_mm && old_mm->context.size > 0) {
down(&old_mm->context.sem);
@@ -124,7 +122,6 @@ void destroy_context(struct mm_struct *mm)
make_pages_writable(mm->context.ldt,
(mm->context.size * LDT_ENTRY_SIZE) /
PAGE_SIZE);
- flush_page_update_queue();
if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
else
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/mpparse.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/mpparse.c
new file mode 100644
index 0000000000..16f2ee8c80
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/mpparse.c
@@ -0,0 +1,1115 @@
+/*
+ * Intel Multiprocessor Specification 1.1 and 1.4
+ * compliant MP-table parsing routines.
+ *
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *
+ * Fixes
+ * Erich Boleyn : MP v1.4 and additional changes.
+ * Alan Cox : Added EBDA scanning
+ * Ingo Molnar : various cleanups and rewrites
+ * Maciej W. Rozycki: Bits for default MP configurations
+ * Paul Diefenbaugh: Added full ACPI support
+ */
+
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/bootmem.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/bitops.h>
+
+#include <asm/smp.h>
+#include <asm/acpi.h>
+#include <asm/mtrr.h>
+#include <asm/mpspec.h>
+#include <asm/io_apic.h>
+
+#include <mach_apic.h>
+#include <mach_mpparse.h>
+#include <bios_ebda.h>
+
+/* Have we found an MP table */
+int smp_found_config;
+unsigned int __initdata maxcpus = NR_CPUS;
+
+/*
+ * Various Linux-internal data structures created from the
+ * MP-table.
+ */
+int apic_version [MAX_APICS];
+int mp_bus_id_to_type [MAX_MP_BUSSES];
+int mp_bus_id_to_node [MAX_MP_BUSSES];
+int mp_bus_id_to_local [MAX_MP_BUSSES];
+int quad_local_to_mp_bus_id [NR_CPUS/4][4];
+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
+int mp_current_pci_id;
+
+/* I/O APIC entries */
+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
+
+/* # of MP IRQ source entries */
+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+
+/* MP IRQ source entries */
+int mp_irq_entries;
+
+int nr_ioapics;
+
+int pic_mode;
+unsigned long mp_lapic_addr;
+
+/* Processor that is doing the boot up */
+unsigned int boot_cpu_physical_apicid = -1U;
+unsigned int boot_cpu_logical_apicid = -1U;
+/* Internal processor count */
+static unsigned int __initdata num_processors;
+
+/* Bitmask of physically existing CPUs */
+physid_mask_t phys_cpu_present_map;
+
+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+
+/*
+ * Intel MP BIOS table parsing routines:
+ */
+
+
+/*
+ * Checksum an MP configuration block.
+ */
+
+static int __init mpf_checksum(unsigned char *mp, int len)
+{
+ int sum = 0;
+
+ while (len--)
+ sum += *mp++;
+
+ return sum & 0xFF;
+}
+
+/*
+ * Have to match translation table entries to main table entries by counter
+ * hence the mpc_record variable .... can't see a less disgusting way of
+ * doing this ....
+ */
+
+static int mpc_record;
+static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
+
+#ifdef CONFIG_X86_NUMAQ
+static int MP_valid_apicid(int apicid, int version)
+{
+ return hweight_long(apicid & 0xf) == 1 && (apicid >> 4) != 0xf;
+}
+#elif !defined(CONFIG_XEN)
+static int MP_valid_apicid(int apicid, int version)
+{
+ if (version >= 0x14)
+ return apicid < 0xff;
+ else
+ return apicid < 0xf;
+}
+#endif
+
+#ifndef CONFIG_XEN
+void __init MP_processor_info (struct mpc_config_processor *m)
+{
+ int ver, apicid;
+ physid_mask_t tmp;
+
+ if (!(m->mpc_cpuflag & CPU_ENABLED))
+ return;
+
+ apicid = mpc_apic_id(m, translation_table[mpc_record]);
+
+ if (m->mpc_featureflag&(1<<0))
+ Dprintk(" Floating point unit present.\n");
+ if (m->mpc_featureflag&(1<<7))
+ Dprintk(" Machine Exception supported.\n");
+ if (m->mpc_featureflag&(1<<8))
+ Dprintk(" 64 bit compare & exchange supported.\n");
+ if (m->mpc_featureflag&(1<<9))
+ Dprintk(" Internal APIC present.\n");
+ if (m->mpc_featureflag&(1<<11))
+ Dprintk(" SEP present.\n");
+ if (m->mpc_featureflag&(1<<12))
+ Dprintk(" MTRR present.\n");
+ if (m->mpc_featureflag&(1<<13))
+ Dprintk(" PGE present.\n");
+ if (m->mpc_featureflag&(1<<14))
+ Dprintk(" MCA present.\n");
+ if (m->mpc_featureflag&(1<<15))
+ Dprintk(" CMOV present.\n");
+ if (m->mpc_featureflag&(1<<16))
+ Dprintk(" PAT present.\n");
+ if (m->mpc_featureflag&(1<<17))
+ Dprintk(" PSE present.\n");
+ if (m->mpc_featureflag&(1<<18))
+ Dprintk(" PSN present.\n");
+ if (m->mpc_featureflag&(1<<19))
+ Dprintk(" Cache Line Flush Instruction present.\n");
+ /* 20 Reserved */
+ if (m->mpc_featureflag&(1<<21))
+ Dprintk(" Debug Trace and EMON Store present.\n");
+ if (m->mpc_featureflag&(1<<22))
+ Dprintk(" ACPI Thermal Throttle Registers present.\n");
+ if (m->mpc_featureflag&(1<<23))
+ Dprintk(" MMX present.\n");
+ if (m->mpc_featureflag&(1<<24))
+ Dprintk(" FXSR present.\n");
+ if (m->mpc_featureflag&(1<<25))
+ Dprintk(" XMM present.\n");
+ if (m->mpc_featureflag&(1<<26))
+ Dprintk(" Willamette New Instructions present.\n");
+ if (m->mpc_featureflag&(1<<27))
+ Dprintk(" Self Snoop present.\n");
+ if (m->mpc_featureflag&(1<<28))
+ Dprintk(" HT present.\n");
+ if (m->mpc_featureflag&(1<<29))
+ Dprintk(" Thermal Monitor present.\n");
+ /* 30, 31 Reserved */
+
+
+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
+ Dprintk(" Bootup CPU\n");
+ boot_cpu_physical_apicid = m->mpc_apicid;
+ boot_cpu_logical_apicid = apicid;
+ }
+
+ if (num_processors >= NR_CPUS) {
+ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
+ " Processor ignored.\n", NR_CPUS);
+ return;
+ }
+
+ if (num_processors >= maxcpus) {
+ printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
+ " Processor ignored.\n", maxcpus);
+ return;
+ }
+ num_processors++;
+ ver = m->mpc_apicver;
+
+ if (!MP_valid_apicid(apicid, ver)) {
+ printk(KERN_WARNING "Processor #%d INVALID. (Max ID: %d).\n",
+ m->mpc_apicid, MAX_APICS);
+ --num_processors;
+ return;
+ }
+
+ tmp = apicid_to_cpu_present(apicid);
+ physids_or(phys_cpu_present_map, phys_cpu_present_map, tmp);
+
+ /*
+ * Validate version
+ */
+ if (ver == 0x0) {
+ printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
+ ver = 0x10;
+ }
+ apic_version[m->mpc_apicid] = ver;
+ bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
+}
+#else
+void __init MP_processor_info (struct mpc_config_processor *m)
+{
+ num_processors++;
+}
+#endif /* CONFIG_XEN */
+
+static void __init MP_bus_info (struct mpc_config_bus *m)
+{
+ char str[7];
+
+ memcpy(str, m->mpc_bustype, 6);
+ str[6] = 0;
+
+ mpc_oem_bus_info(m, str, translation_table[mpc_record]);
+
+ if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
+ } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
+ } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
+ mpc_oem_pci_bus(m, translation_table[mpc_record]);
+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
+ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
+ mp_current_pci_id++;
+ } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
+ } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
+ } else {
+ printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
+ }
+}
+
+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
+{
+ if (!(m->mpc_flags & MPC_APIC_USABLE))
+ return;
+
+ printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
+ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
+ if (nr_ioapics >= MAX_IO_APICS) {
+ printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
+ MAX_IO_APICS, nr_ioapics);
+ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
+ }
+ if (!m->mpc_apicaddr) {
+ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
+ " found in MP table, skipping!\n");
+ return;
+ }
+ mp_ioapics[nr_ioapics] = *m;
+ nr_ioapics++;
+}
+
+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
+{
+ mp_irqs [mp_irq_entries] = *m;
+ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
+ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
+ m->mpc_irqtype, m->mpc_irqflag & 3,
+ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
+ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
+ panic("Max # of irq sources exceeded!!\n");
+}
+
+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
+{
+ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
+ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
+ m->mpc_irqtype, m->mpc_irqflag & 3,
+ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
+ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
+ /*
+ * Well it seems all SMP boards in existence
+ * use ExtINT/LVT1 == LINT0 and
+ * NMI/LVT2 == LINT1 - the following check
+ * will show us if this assumptions is false.
+ * Until then we do not have to add baggage.
+ */
+ if ((m->mpc_irqtype == mp_ExtINT) &&
+ (m->mpc_destapiclint != 0))
+ BUG();
+ if ((m->mpc_irqtype == mp_NMI) &&
+ (m->mpc_destapiclint != 1))
+ BUG();
+}
+
+#ifdef CONFIG_X86_NUMAQ
+static void __init MP_translation_info (struct mpc_config_translation *m)
+{
+ printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
+
+ if (mpc_record >= MAX_MPC_ENTRY)
+ printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
+ else
+ translation_table[mpc_record] = m; /* stash this for later */
+ if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
+ node_set_online(m->trans_quad);
+}
+
+/*
+ * Read/parse the MPC oem tables
+ */
+
+static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
+ unsigned short oemsize)
+{
+ int count = sizeof (*oemtable); /* the header size */
+ unsigned char *oemptr = ((unsigned char *)oemtable)+count;
+
+ mpc_record = 0;
+ printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
+ if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
+ {
+ printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
+ oemtable->oem_signature[0],
+ oemtable->oem_signature[1],
+ oemtable->oem_signature[2],
+ oemtable->oem_signature[3]);
+ return;
+ }
+ if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
+ {
+ printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
+ return;
+ }
+ while (count < oemtable->oem_length) {
+ switch (*oemptr) {
+ case MP_TRANSLATION:
+ {
+ struct mpc_config_translation *m=
+ (struct mpc_config_translation *)oemptr;
+ MP_translation_info(m);
+ oemptr += sizeof(*m);
+ count += sizeof(*m);
+ ++mpc_record;
+ break;
+ }
+ default:
+ {
+ printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
+ return;
+ }
+ }
+ }
+}
+
+static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
+ char *productid)
+{
+ if (strncmp(oem, "IBM NUMA", 8))
+ printk("Warning! May not be a NUMA-Q system!\n");
+ if (mpc->mpc_oemptr)
+ smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
+ mpc->mpc_oemsize);
+}
+#endif /* CONFIG_X86_NUMAQ */
+
+/*
+ * Read/parse the MPC
+ */
+
+static int __init smp_read_mpc(struct mp_config_table *mpc)
+{
+ char str[16];
+ char oem[10];
+ int count=sizeof(*mpc);
+ unsigned char *mpt=((unsigned char *)mpc)+count;
+
+ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
+ printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
+ *(u32 *)mpc->mpc_signature);
+ return 0;
+ }
+ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
+ printk(KERN_ERR "SMP mptable: checksum error!\n");
+ return 0;
+ }
+ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
+ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
+ mpc->mpc_spec);
+ return 0;
+ }
+ if (!mpc->mpc_lapic) {
+ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
+ return 0;
+ }
+ memcpy(oem,mpc->mpc_oem,8);
+ oem[8]=0;
+ printk(KERN_INFO "OEM ID: %s ",oem);
+
+ memcpy(str,mpc->mpc_productid,12);
+ str[12]=0;
+ printk("Product ID: %s ",str);
+
+ mps_oem_check(mpc, oem, str);
+
+ printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
+
+ /*
+ * Save the local APIC address (it might be non-default) -- but only
+ * if we're not using ACPI.
+ */
+ if (!acpi_lapic)
+ mp_lapic_addr = mpc->mpc_lapic;
+
+ /*
+ * Now process the configuration blocks.
+ */
+ mpc_record = 0;
+ while (count < mpc->mpc_length) {
+ switch(*mpt) {
+ case MP_PROCESSOR:
+ {
+ struct mpc_config_processor *m=
+ (struct mpc_config_processor *)mpt;
+ /* ACPI may have already provided this data */
+ if (!acpi_lapic)
+ MP_processor_info(m);
+ mpt += sizeof(*m);
+ count += sizeof(*m);
+ break;
+ }
+ case MP_BUS:
+ {
+ struct mpc_config_bus *m=
+ (struct mpc_config_bus *)mpt;
+ MP_bus_info(m);
+ mpt += sizeof(*m);
+ count += sizeof(*m);
+ break;
+ }
+ case MP_IOAPIC:
+ {
+ struct mpc_config_ioapic *m=
+ (struct mpc_config_ioapic *)mpt;
+ MP_ioapic_info(m);
+ mpt+=sizeof(*m);
+ count+=sizeof(*m);
+ break;
+ }
+ case MP_INTSRC:
+ {
+ struct mpc_config_intsrc *m=
+ (struct mpc_config_intsrc *)mpt;
+
+ MP_intsrc_info(m);
+ mpt+=sizeof(*m);
+ count+=sizeof(*m);
+ break;
+ }
+ case MP_LINTSRC:
+ {
+ struct mpc_config_lintsrc *m=
+ (struct mpc_config_lintsrc *)mpt;
+ MP_lintsrc_info(m);
+ mpt+=sizeof(*m);
+ count+=sizeof(*m);
+ break;
+ }
+ default:
+ {
+ count = mpc->mpc_length;
+ break;
+ }
+ }
+ ++mpc_record;
+ }
+ clustered_apic_check();
+ if (!num_processors)
+ printk(KERN_ERR "SMP mptable: no processors registered!\n");
+ return num_processors;
+}
+
+static int __init ELCR_trigger(unsigned int irq)
+{
+ unsigned int port;
+
+ port = 0x4d0 + (irq >> 3);
+ return (inb(port) >> (irq & 7)) & 1;
+}
+
+static void __init construct_default_ioirq_mptable(int mpc_default_type)
+{
+ struct mpc_config_intsrc intsrc;
+ int i;
+ int ELCR_fallback = 0;
+
+ intsrc.mpc_type = MP_INTSRC;
+ intsrc.mpc_irqflag = 0; /* conforming */
+ intsrc.mpc_srcbus = 0;
+ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
+
+ intsrc.mpc_irqtype = mp_INT;
+
+ /*
+ * If true, we have an ISA/PCI system with no IRQ entries
+ * in the MP table. To prevent the PCI interrupts from being set up
+ * incorrectly, we try to use the ELCR. The sanity check to see if
+ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
+ * never be level sensitive, so we simply see if the ELCR agrees.
+ * If it does, we assume it's valid.
+ */
+ if (mpc_default_type == 5) {
+ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
+
+ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
+ printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
+ else {
+ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
+ ELCR_fallback = 1;
+ }
+ }
+
+ for (i = 0; i < 16; i++) {
+ switch (mpc_default_type) {
+ case 2:
+ if (i == 0 || i == 13)
+ continue; /* IRQ0 & IRQ13 not connected */
+ /* fall through */
+ default:
+ if (i == 2)
+ continue; /* IRQ2 is never connected */
+ }
+
+ if (ELCR_fallback) {
+ /*
+ * If the ELCR indicates a level-sensitive interrupt, we
+ * copy that information over to the MP table in the
+ * irqflag field (level sensitive, active high polarity).
+ */
+ if (ELCR_trigger(i))
+ intsrc.mpc_irqflag = 13;
+ else
+ intsrc.mpc_irqflag = 0;
+ }
+
+ intsrc.mpc_srcbusirq = i;
+ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
+ MP_intsrc_info(&intsrc);
+ }
+
+ intsrc.mpc_irqtype = mp_ExtINT;
+ intsrc.mpc_srcbusirq = 0;
+ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
+ MP_intsrc_info(&intsrc);
+}
+
+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
+{
+ struct mpc_config_processor processor;
+ struct mpc_config_bus bus;
+ struct mpc_config_ioapic ioapic;
+ struct mpc_config_lintsrc lintsrc;
+ int linttypes[2] = { mp_ExtINT, mp_NMI };
+ int i;
+
+ /*
+ * local APIC has default address
+ */
+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
+
+ /*
+ * 2 CPUs, numbered 0 & 1.
+ */
+ processor.mpc_type = MP_PROCESSOR;
+ /* Either an integrated APIC or a discrete 82489DX. */
+ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
+ processor.mpc_cpuflag = CPU_ENABLED;
+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
+ (boot_cpu_data.x86_model << 4) |
+ boot_cpu_data.x86_mask;
+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
+ processor.mpc_reserved[0] = 0;
+ processor.mpc_reserved[1] = 0;
+ for (i = 0; i < 2; i++) {
+ processor.mpc_apicid = i;
+ MP_processor_info(&processor);
+ }
+
+ bus.mpc_type = MP_BUS;
+ bus.mpc_busid = 0;
+ switch (mpc_default_type) {
+ default:
+ printk("???\n");
+ printk(KERN_ERR "Unknown standard configuration %d\n",
+ mpc_default_type);
+ /* fall through */
+ case 1:
+ case 5:
+ memcpy(bus.mpc_bustype, "ISA ", 6);
+ break;
+ case 2:
+ case 6:
+ case 3:
+ memcpy(bus.mpc_bustype, "EISA ", 6);
+ break;
+ case 4:
+ case 7:
+ memcpy(bus.mpc_bustype, "MCA ", 6);
+ }
+ MP_bus_info(&bus);
+ if (mpc_default_type > 4) {
+ bus.mpc_busid = 1;
+ memcpy(bus.mpc_bustype, "PCI ", 6);
+ MP_bus_info(&bus);
+ }
+
+ ioapic.mpc_type = MP_IOAPIC;
+ ioapic.mpc_apicid = 2;
+ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
+ ioapic.mpc_flags = MPC_APIC_USABLE;
+ ioapic.mpc_apicaddr = 0xFEC00000;
+ MP_ioapic_info(&ioapic);
+
+ /*
+ * We set up most of the low 16 IO-APIC pins according to MPS rules.
+ */
+ construct_default_ioirq_mptable(mpc_default_type);
+
+ lintsrc.mpc_type = MP_LINTSRC;
+ lintsrc.mpc_irqflag = 0; /* conforming */
+ lintsrc.mpc_srcbusid = 0;
+ lintsrc.mpc_srcbusirq = 0;
+ lintsrc.mpc_destapic = MP_APIC_ALL;
+ for (i = 0; i < 2; i++) {
+ lintsrc.mpc_irqtype = linttypes[i];
+ lintsrc.mpc_destapiclint = i;
+ MP_lintsrc_info(&lintsrc);
+ }
+}
+
+static struct intel_mp_floating *mpf_found;
+
+/*
+ * Scan the memory blocks for an SMP configuration block.
+ */
+void __init get_smp_config (void)
+{
+ struct intel_mp_floating *mpf = mpf_found;
+
+ /*
+ * ACPI may be used to obtain the entire SMP configuration or just to
+ * enumerate/configure processors (CONFIG_ACPI_BOOT). Note that
+ * ACPI supports both logical (e.g. Hyper-Threading) and physical
+ * processors, where MPS only supports physical.
+ */
+ if (acpi_lapic && acpi_ioapic) {
+ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
+ return;
+ }
+ else if (acpi_lapic)
+ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
+
+ printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
+ if (mpf->mpf_feature2 & (1<<7)) {
+ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
+ pic_mode = 1;
+ } else {
+ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
+ pic_mode = 0;
+ }
+
+ /*
+ * Now see if we need to read further.
+ */
+ if (mpf->mpf_feature1 != 0) {
+
+ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
+ construct_default_ISA_mptable(mpf->mpf_feature1);
+
+ } else if (mpf->mpf_physptr) {
+
+ /*
+ * Read the physical hardware table. Anything here will
+ * override the defaults.
+ */
+ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
+ smp_found_config = 0;
+ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
+ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
+ return;
+ }
+ /*
+ * If there are no explicit MP IRQ entries, then we are
+ * broken. We set up most of the low 16 IO-APIC pins to
+ * ISA defaults and hope it will work.
+ */
+ if (!mp_irq_entries) {
+ struct mpc_config_bus bus;
+
+ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
+
+ bus.mpc_type = MP_BUS;
+ bus.mpc_busid = 0;
+ memcpy(bus.mpc_bustype, "ISA ", 6);
+ MP_bus_info(&bus);
+
+ construct_default_ioirq_mptable(0);
+ }
+
+ } else
+ BUG();
+
+ printk(KERN_INFO "Processors: %d\n", num_processors);
+ /*
+ * Only use the first configuration found.
+ */
+}
+
+static int __init smp_scan_config (unsigned long base, unsigned long length)
+{
+ unsigned long *bp = isa_bus_to_virt(base);
+ struct intel_mp_floating *mpf;
+
+ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
+ if (sizeof(*mpf) != 16)
+ printk("Error: MPF size\n");
+
+ while (length > 0) {
+ mpf = (struct intel_mp_floating *)bp;
+ if ((*bp == SMP_MAGIC_IDENT) &&
+ (mpf->mpf_length == 1) &&
+ !mpf_checksum((unsigned char *)bp, 16) &&
+ ((mpf->mpf_specification == 1)
+ || (mpf->mpf_specification == 4)) ) {
+
+ smp_found_config = 1;
+ printk(KERN_INFO "found SMP MP-table at %08lx\n",
+ virt_to_phys(mpf));
+ if (mpf->mpf_physptr) {
+ /*
+ * We cannot access to MPC table to compute
+ * table size yet, as only few megabytes from
+ * the bottom is mapped now.
+ * PC-9800's MPC table places on the very last
+ * of physical memory; so that simply reserving
+ * PAGE_SIZE from mpg->mpf_physptr yields BUG()
+ * in reserve_bootmem.
+ */
+ unsigned long size = PAGE_SIZE;
+ unsigned long end = max_low_pfn * PAGE_SIZE;
+ if (mpf->mpf_physptr + size > end)
+ size = end - mpf->mpf_physptr;
+ reserve_bootmem(mpf->mpf_physptr, size);
+ }
+
+ mpf_found = mpf;
+ return 1;
+ }
+ bp += 4;
+ length -= 16;
+ }
+ return 0;
+}
+
+void __init find_smp_config (void)
+{
+ unsigned int address;
+
+ /*
+ * FIXME: Linux assumes you have 640K of base ram..
+ * this continues the error...
+ *
+ * 1) Scan the bottom 1K for a signature
+ * 2) Scan the top 1K of base RAM
+ * 3) Scan the 64K of bios
+ */
+ if (smp_scan_config(0x0,0x400) ||
+ smp_scan_config(639*0x400,0x400) ||
+ smp_scan_config(0xF0000,0x10000))
+ return;
+ /*
+ * If it is an SMP machine we should know now, unless the
+ * configuration is in an EISA/MCA bus machine with an
+ * extended bios data area.
+ *
+ * there is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E, calculate and scan it here.
+ *
+ * NOTE! There are Linux loaders that will corrupt the EBDA
+ * area, and as such this kind of SMP config may be less
+ * trustworthy, simply because the SMP table may have been
+ * stomped on during early boot. These loaders are buggy and
+ * should be fixed.
+ *
+ * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
+ */
+
+ address = get_bios_ebda();
+ if (address)
+ smp_scan_config(address, 0x400);
+}
+
+/* --------------------------------------------------------------------------
+ ACPI-based MP Configuration
+ -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_BOOT
+
+void __init mp_register_lapic_address (
+ u64 address)
+{
+#ifndef CONFIG_XEN
+ mp_lapic_addr = (unsigned long) address;
+
+ if (boot_cpu_physical_apicid == -1U)
+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+
+ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
+#endif
+}
+
+
+void __init mp_register_lapic (
+ u8 id,
+ u8 enabled)
+{
+ struct mpc_config_processor processor;
+ int boot_cpu = 0;
+
+ if (MAX_APICS - id <= 0) {
+ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
+ id, MAX_APICS);
+ return;
+ }
+
+ if (id == boot_cpu_physical_apicid)
+ boot_cpu = 1;
+
+#ifndef CONFIG_XEN
+ processor.mpc_type = MP_PROCESSOR;
+ processor.mpc_apicid = id;
+ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
+ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
+ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
+ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
+ processor.mpc_reserved[0] = 0;
+ processor.mpc_reserved[1] = 0;
+#endif
+
+ MP_processor_info(&processor);
+}
+
+#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_ACPI_BOOT))
+
+#define MP_ISA_BUS 0
+#define MP_MAX_IOAPIC_PIN 127
+
+struct mp_ioapic_routing {
+ int apic_id;
+ int gsi_base;
+ int gsi_end;
+ u32 pin_programmed[4];
+} mp_ioapic_routing[MAX_IO_APICS];
+
+
+static int mp_find_ioapic (
+ int gsi)
+{
+ int i = 0;
+
+ /* Find the IOAPIC that manages this GSI. */
+ for (i = 0; i < nr_ioapics; i++) {
+ if ((gsi >= mp_ioapic_routing[i].gsi_base)
+ && (gsi <= mp_ioapic_routing[i].gsi_end))
+ return i;
+ }
+
+ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
+
+ return -1;
+}
+
+
+void __init mp_register_ioapic (
+ u8 id,
+ u32 address,
+ u32 gsi_base)
+{
+ int idx = 0;
+
+ if (nr_ioapics >= MAX_IO_APICS) {
+ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
+ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
+ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
+ }
+ if (!address) {
+ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
+ " found in MADT table, skipping!\n");
+ return;
+ }
+
+ idx = nr_ioapics++;
+
+ mp_ioapics[idx].mpc_type = MP_IOAPIC;
+ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
+ mp_ioapics[idx].mpc_apicaddr = address;
+
+ mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
+ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
+
+ /*
+ * Build basic GSI lookup table to facilitate gsi->io_apic lookups
+ * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
+ */
+ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
+ mp_ioapic_routing[idx].gsi_base = gsi_base;
+ mp_ioapic_routing[idx].gsi_end = gsi_base +
+ io_apic_get_redir_entries(idx);
+
+ printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
+ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
+ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
+ mp_ioapic_routing[idx].gsi_base,
+ mp_ioapic_routing[idx].gsi_end);
+
+ return;
+}
+
+
+void __init mp_override_legacy_irq (
+ u8 bus_irq,
+ u8 polarity,
+ u8 trigger,
+ u32 gsi)
+{
+ struct mpc_config_intsrc intsrc;
+ int ioapic = -1;
+ int pin = -1;
+
+ /*
+ * Convert 'gsi' to 'ioapic.pin'.
+ */
+ ioapic = mp_find_ioapic(gsi);
+ if (ioapic < 0)
+ return;
+ pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
+
+ /*
+ * TBD: This check is for faulty timer entries, where the override
+ * erroneously sets the trigger to level, resulting in a HUGE
+ * increase of timer interrupts!
+ */
+ if ((bus_irq == 0) && (trigger == 3))
+ trigger = 1;
+
+ intsrc.mpc_type = MP_INTSRC;
+ intsrc.mpc_irqtype = mp_INT;
+ intsrc.mpc_irqflag = (trigger << 2) | polarity;
+ intsrc.mpc_srcbus = MP_ISA_BUS;
+ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
+ intsrc.mpc_dstirq = pin; /* INTIN# */
+
+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
+ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
+
+ mp_irqs[mp_irq_entries] = intsrc;
+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
+ panic("Max # of irq sources exceeded!\n");
+
+ return;
+}
+
+
+void __init mp_config_acpi_legacy_irqs (void)
+{
+ struct mpc_config_intsrc intsrc;
+ int i = 0;
+ int ioapic = -1;
+
+ /*
+ * Fabricate the legacy ISA bus (bus #31).
+ */
+ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
+ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
+
+ /*
+ * ES7000 has no legacy identity mappings
+ */
+ if (es7000_plat)
+ return;
+
+ /*
+ * Locate the IOAPIC that manages the ISA IRQs (0-15).
+ */
+ ioapic = mp_find_ioapic(0);
+ if (ioapic < 0)
+ return;
+
+ intsrc.mpc_type = MP_INTSRC;
+ intsrc.mpc_irqflag = 0; /* Conforming */
+ intsrc.mpc_srcbus = MP_ISA_BUS;
+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
+
+ /*
+ * Use the default configuration for the IRQs 0-15. Unless
+ * overriden by (MADT) interrupt source override entries.
+ */
+ for (i = 0; i < 16; i++) {
+ int idx;
+
+ for (idx = 0; idx < mp_irq_entries; idx++) {
+ struct mpc_config_intsrc *irq = mp_irqs + idx;
+
+ /* Do we already have a mapping for this ISA IRQ? */
+ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
+ break;
+
+ /* Do we already have a mapping for this IOAPIC pin */
+ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
+ (irq->mpc_dstirq == i))
+ break;
+ }
+
+ if (idx != mp_irq_entries) {
+ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
+ continue; /* IRQ already used */
+ }
+
+ intsrc.mpc_irqtype = mp_INT;
+ intsrc.mpc_srcbusirq = i; /* Identity mapped */
+ intsrc.mpc_dstirq = i;
+
+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
+ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
+ intsrc.mpc_dstirq);
+
+ mp_irqs[mp_irq_entries] = intsrc;
+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
+ panic("Max # of irq sources exceeded!\n");
+ }
+}
+
+int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
+{
+ int ioapic = -1;
+ int ioapic_pin = 0;
+ int idx, bit = 0;
+
+#ifdef CONFIG_ACPI_BUS
+ /* Don't set up the ACPI SCI because it's already set up */
+ if (acpi_fadt.sci_int == gsi)
+ return gsi;
+#endif
+
+ ioapic = mp_find_ioapic(gsi);
+ if (ioapic < 0) {
+ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
+ return gsi;
+ }
+
+ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
+
+ if (ioapic_renumber_irq)
+ gsi = ioapic_renumber_irq(ioapic, gsi);
+
+ /*
+ * Avoid pin reprogramming. PRTs typically include entries
+ * with redundant pin->gsi mappings (but unique PCI devices);
+ * we only program the IOAPIC on the first.
+ */
+ bit = ioapic_pin % 32;
+ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
+ if (idx > 3) {
+ printk(KERN_ERR "Invalid reference to IOAPIC pin "
+ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
+ ioapic_pin);
+ return gsi;
+ }
+ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
+ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
+ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
+ return gsi;
+ }
+
+ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
+
+ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
+ edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
+ active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
+ return gsi;
+}
+
+#endif /*CONFIG_X86_IO_APIC && (CONFIG_ACPI_INTERPRETER || CONFIG_ACPI_BOOT)*/
+#endif /*CONFIG_ACPI_BOOT*/
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c
index 9df49e66ae..0716f0c62b 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c
@@ -14,12 +14,7 @@
#include <linux/version.h>
#include <asm/io.h>
#include <asm-xen/balloon.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#define pte_offset_kernel pte_offset
-#define pud_t pgd_t
-#define pud_offset(d, va) d
-#endif
+#include <asm/tlbflush.h>
struct dma_coherent_mem {
void *virt_base;
@@ -54,10 +49,10 @@ xen_contig_memory(unsigned long vstart, unsigned int order)
pmd = pmd_offset(pud, vstart + (i*PAGE_SIZE));
pte = pte_offset_kernel(pmd, vstart + (i*PAGE_SIZE));
pfn = pte_val_ma(*pte) >> PAGE_SHIFT;
- queue_l1_entry_update(pte, 0);
+ HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
+ __pte_ma(0), 0);
phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
INVALID_P2M_ENTRY;
- flush_page_update_queue();
if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
&pfn, 1, 0) != 1) BUG();
}
@@ -66,41 +61,23 @@ xen_contig_memory(unsigned long vstart, unsigned int order)
&pfn, 1, order) != 1) BUG();
/* 3. Map the new extent in place of old pages. */
for (i = 0; i < (1<<order); i++) {
- pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
- pud = pud_offset(pgd, vstart + (i*PAGE_SIZE));
- pmd = pmd_offset(pud, vstart + (i*PAGE_SIZE));
- pte = pte_offset_kernel(pmd, vstart + (i*PAGE_SIZE));
- queue_l1_entry_update(pte,
- ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
- queue_machphys_update(pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
+ HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
+ __pte_ma(((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0);
+ xen_machphys_update(pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = pfn+i;
}
- /* Flush updates through and flush the TLB. */
- xen_tlb_flush();
+ flush_tlb_all();
balloon_unlock(flags);
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-#else
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int gfp)
-#endif
{
void *ret;
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
unsigned int order = get_order(size);
unsigned long vstart;
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
- int gfp = GFP_ATOMIC;
-
- if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
-#else
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
@@ -119,7 +96,6 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
gfp |= GFP_DMA;
-#endif
vstart = __get_free_pages(gfp, order);
ret = (void *)vstart;
@@ -133,14 +109,6 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-void pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-#else
-
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
@@ -231,5 +199,3 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
return mem->virt_base + (pos << PAGE_SHIFT);
}
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-
-#endif
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c
index 9a0e5d1322..9061053081 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c
@@ -13,6 +13,7 @@
#include <stdarg.h>
+#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/fs.h>
@@ -46,8 +47,7 @@
#include <asm/i387.h>
#include <asm/irq.h>
#include <asm/desc.h>
-#include <asm-xen/multicall.h>
-#include <asm-xen/xen-public/dom0_ops.h>
+#include <asm-xen/xen-public/physdev.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif
@@ -55,6 +55,9 @@
#include <linux/irq.h>
#include <linux/err.h>
+#include <asm/tlbflush.h>
+#include <asm/cpu.h>
+
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
int hlt_counter;
@@ -113,6 +116,33 @@ void xen_idle(void)
}
}
+#ifdef CONFIG_HOTPLUG_CPU
+#include <asm/nmi.h>
+/* We don't actually take CPU down, just spin without interrupts. */
+static inline void play_dead(void)
+{
+ /* Ack it */
+ __get_cpu_var(cpu_state) = CPU_DEAD;
+
+ /* We shouldn't have to disable interrupts while dead, but
+ * some interrupts just don't seem to go away, and this makes
+ * it "work" for testing purposes. */
+ /* Death loop */
+ while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
+ HYPERVISOR_yield();
+
+ local_irq_disable();
+ __flush_tlb_all();
+ cpu_set(smp_processor_id(), cpu_online_map);
+ local_irq_enable();
+}
+#else
+static inline void play_dead(void)
+{
+ BUG();
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
@@ -131,6 +161,9 @@ void cpu_idle (void)
cpu_clear(cpu, cpu_idle_map);
rmb();
+ if (cpu_is_offline(cpu))
+ play_dead();
+
irq_stat[cpu].idle_timestamp = jiffies;
xen_idle();
}
@@ -228,20 +261,11 @@ void exit_thread(void)
/* The process may have allocated an io port bitmap... nuke it. */
if (unlikely(NULL != t->io_bitmap_ptr)) {
- int cpu = get_cpu();
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
-
+ physdev_op_t op = { 0 };
+ op.cmd = PHYSDEVOP_SET_IOBITMAP;
+ HYPERVISOR_physdev_op(&op);
kfree(t->io_bitmap_ptr);
t->io_bitmap_ptr = NULL;
- /*
- * Careful, clear this in the TSS too:
- */
- memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
- t->io_bitmap_max = 0;
- tss->io_bitmap_owner = NULL;
- tss->io_bitmap_max = 0;
- tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
- put_cpu();
}
}
@@ -290,7 +314,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
struct pt_regs * childregs;
struct task_struct *tsk;
int err;
- unsigned long eflags;
childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
*childregs = *regs;
@@ -340,9 +363,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
desc->b = LDT_entry_b(&info);
}
-
- __asm__ __volatile__ ( "pushfl; popl %0" : "=r" (eflags) : );
- p->thread.io_pl = (eflags >> 12) & 3;
+ p->thread.io_pl = current->thread.io_pl;
err = 0;
out:
@@ -415,37 +436,6 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
return 1;
}
-static inline void
-handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
-{
- if (!next->io_bitmap_ptr) {
- /*
- * Disable the bitmap via an invalid offset. We still cache
- * the previous bitmap owner and the IO bitmap contents:
- */
- tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
- return;
- }
- if (likely(next == tss->io_bitmap_owner)) {
- /*
- * Previous owner of the bitmap (hence the bitmap content)
- * matches the next task, we dont have to do anything but
- * to set a valid offset in the TSS:
- */
- tss->io_bitmap_base = IO_BITMAP_OFFSET;
- return;
- }
- /*
- * Lazy TSS's I/O bitmap copy. We set an invalid offset here
- * and we let the task to get a GPF in case an I/O instruction
- * is performed. The handler of the GPF will verify that the
- * faulting task has a valid I/O bitmap and, it true, does the
- * real copy and restart the instruction. This will save us
- * redundant copies when the currently switched task does not
- * perform any I/O during its timeslice.
- */
- tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
-}
/*
* This special macro can be used to load a debugging register
*/
@@ -486,29 +476,10 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
*next = &next_p->thread;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
- dom0_op_t op;
-
- /* NB. No need to disable interrupts as already done in sched.c */
- /* __cli(); */
-
- /*
- * Save away %fs and %gs. No need to save %es and %ds, as
- * those are always kernel segments while inside the kernel.
- */
- asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
- asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
+ physdev_op_t iopl_op, iobmp_op;
+ multicall_entry_t _mcl[8], *mcl = _mcl;
- /*
- * We clobber FS and GS here so that we avoid a GPF when restoring
- * previous task's FS/GS values in Xen when the LDT is switched.
- */
- __asm__ __volatile__ (
- "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : :
- "eax" );
-
- MULTICALL_flush_page_update_queue();
-
- /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+ /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
/*
* This is basically '__unlazy_fpu', except that we queue a
@@ -517,7 +488,9 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
*/
if (prev_p->thread_info->status & TS_USEDFPU) {
__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
- queue_multicall0(__HYPERVISOR_fpu_taskswitch);
+ mcl->op = __HYPERVISOR_fpu_taskswitch;
+ mcl->args[0] = 1;
+ mcl++;
}
/*
@@ -525,35 +498,50 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
* This is load_esp0(tss, next) with a multicall.
*/
tss->esp0 = next->esp0;
- queue_multicall2(__HYPERVISOR_stack_switch, tss->ss0, tss->esp0);
+ mcl->op = __HYPERVISOR_stack_switch;
+ mcl->args[0] = tss->ss0;
+ mcl->args[1] = tss->esp0;
+ mcl++;
/*
* Load the per-thread Thread-Local Storage descriptor.
* This is load_TLS(next, cpu) with multicalls.
*/
-#define C(i) do { \
- if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
- next->tls_array[i].b != prev->tls_array[i].b)) \
- queue_multicall3(__HYPERVISOR_update_descriptor, \
- virt_to_machine(&get_cpu_gdt_table(cpu) \
- [GDT_ENTRY_TLS_MIN + i]), \
- ((u32 *)&next->tls_array[i])[0], \
- ((u32 *)&next->tls_array[i])[1]); \
+#define C(i) do { \
+ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
+ next->tls_array[i].b != prev->tls_array[i].b)) { \
+ mcl->op = __HYPERVISOR_update_descriptor; \
+ mcl->args[0] = virt_to_machine(&get_cpu_gdt_table(cpu) \
+ [GDT_ENTRY_TLS_MIN + i]); \
+ mcl->args[1] = ((u32 *)&next->tls_array[i])[0]; \
+ mcl->args[2] = ((u32 *)&next->tls_array[i])[1]; \
+ mcl++; \
+ } \
} while (0)
C(0); C(1); C(2);
#undef C
- if (xen_start_info.flags & SIF_PRIVILEGED) {
- op.cmd = DOM0_IOPL;
- op.u.iopl.domain = DOMID_SELF;
- op.u.iopl.iopl = next->io_pl;
- op.interface_version = DOM0_INTERFACE_VERSION;
- queue_multicall1(__HYPERVISOR_dom0_op, (unsigned long)&op);
+ if (unlikely(prev->io_pl != next->io_pl)) {
+ iopl_op.cmd = PHYSDEVOP_SET_IOPL;
+ iopl_op.u.set_iopl.iopl = next->io_pl;
+ mcl->op = __HYPERVISOR_physdev_op;
+ mcl->args[0] = (unsigned long)&iopl_op;
+ mcl++;
}
- /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
- execute_multicall_list();
- /* __sti(); */
+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
+ iobmp_op.cmd =
+ PHYSDEVOP_SET_IOBITMAP;
+ iobmp_op.u.set_iobitmap.bitmap =
+ (unsigned long)next->io_bitmap_ptr;
+ iobmp_op.u.set_iobitmap.nr_ports =
+ next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
+ mcl->op = __HYPERVISOR_physdev_op;
+ mcl->args[0] = (unsigned long)&iobmp_op;
+ mcl++;
+ }
+
+ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
/*
* Restore %fs and %gs if needed.
@@ -576,9 +564,6 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
loaddebug(next, 7);
}
- if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
- handle_io_bitmap(next, tss);
-
return prev_p;
}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c
index f6096f03b7..c5f6c665fb 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c
@@ -41,6 +41,7 @@
#include <linux/init.h>
#include <linux/edd.h>
#include <linux/kernel.h>
+#include <linux/percpu.h>
#include <linux/notifier.h>
#include <video/edid.h>
#include <asm/e820.h>
@@ -52,6 +53,7 @@
#include <asm/ist.h>
#include <asm/io.h>
#include <asm-xen/hypervisor.h>
+#include <asm-xen/xen-public/physdev.h>
#include "setup_arch_pre.h"
#include <bios_ebda.h>
@@ -287,6 +289,10 @@ static void __init probe_roms(void)
unsigned char *rom;
int i;
+ /* Nothing to do if not running in dom0. */
+ if (!(xen_start_info.flags & SIF_INITDOMAIN))
+ return;
+
/* video rom */
upper = adapter_rom_resources[0].start;
for (start = video_rom_resource.start; start < upper; start += 2048) {
@@ -357,9 +363,6 @@ EXPORT_SYMBOL(HYPERVISOR_shared_info);
unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
EXPORT_SYMBOL(phys_to_machine_mapping);
-multicall_entry_t multicall_list[8];
-int nr_multicall_ents = 0;
-
/* Raw start-of-day parameters from the hypervisor. */
union xen_start_info_union xen_start_info_union;
@@ -780,7 +783,7 @@ static void __init parse_cmdline_early (char ** cmdline_p)
noexec_setup(from + 7);
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_X86_MPPARSE
/*
* If the BIOS enumerates physical processors before logical,
* maxcpus=N at enumeration-time can be used to disable HT.
@@ -1134,12 +1137,6 @@ static unsigned long __init setup_memory(void)
*/
acpi_reserve_bootmem();
#endif
-#ifdef CONFIG_X86_FIND_SMP_CONFIG
- /*
- * Find and reserve possible boot-time SMP configuration:
- */
- find_smp_config();
-#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (xen_start_info.mod_start) {
@@ -1220,8 +1217,9 @@ static void __init register_memory(void)
else
legacy_init_iomem_resources(&code_resource, &data_resource);
- /* EFI systems may still have VGA */
- request_resource(&iomem_resource, &video_ram_resource);
+ if (xen_start_info.flags & SIF_INITDOMAIN)
+ /* EFI systems may still have VGA */
+ request_resource(&iomem_resource, &video_ram_resource);
/* request I/O space for devices used on all i[345]86 PCs */
for (i = 0; i < STANDARD_IO_RESOURCES; i++)
@@ -1397,6 +1395,7 @@ static void set_mca_bus(int x) { }
void __init setup_arch(char **cmdline_p)
{
int i, j;
+ physdev_op_t op;
unsigned long max_low_pfn;
/* Force a quick death if the kernel panics. */
@@ -1408,6 +1407,8 @@ void __init setup_arch(char **cmdline_p)
notifier_chain_register(&panic_notifier_list, &xen_panic_block);
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_writable_pagetables);
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
early_cpu_init();
@@ -1501,6 +1502,13 @@ void __init setup_arch(char **cmdline_p)
#endif
paging_init();
+#ifdef CONFIG_X86_FIND_SMP_CONFIG
+ /*
+ * Find and reserve possible boot-time SMP configuration:
+ */
+ find_smp_config();
+#endif
+
/* Make sure we have a correctly sized P->M table. */
if (max_pfn != xen_start_info.nr_pages) {
phys_to_machine_mapping = alloc_bootmem_low_pages(
@@ -1564,6 +1572,18 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled)
efi_map_memmap();
+ op.cmd = PHYSDEVOP_SET_IOPL;
+ op.u.set_iopl.iopl = current->thread.io_pl = 1;
+ HYPERVISOR_physdev_op(&op);
+
+#ifdef CONFIG_ACPI_BOOT
+ if (!(xen_start_info.flags & SIF_INITDOMAIN)) {
+ printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
+ acpi_disabled = 1;
+ acpi_ht = 0;
+ }
+#endif
+
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
@@ -1581,17 +1601,6 @@ void __init setup_arch(char **cmdline_p)
register_memory();
- /* If we are a privileged guest OS then we should request IO privs. */
- if (xen_start_info.flags & SIF_PRIVILEGED) {
- dom0_op_t op;
- op.cmd = DOM0_IOPL;
- op.u.iopl.domain = DOMID_SELF;
- op.u.iopl.iopl = 1;
- if (HYPERVISOR_dom0_op(&op) != 0)
- panic("Unable to obtain IOPL, despite SIF_PRIVILEGED");
- current->thread.io_pl = 1;
- }
-
if (xen_start_info.flags & SIF_INITDOMAIN) {
if (!(xen_start_info.flags & SIF_PRIVILEGED))
panic("Xen granted us console access "
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smp.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smp.c
new file mode 100644
index 0000000000..fddadbba25
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smp.c
@@ -0,0 +1,624 @@
+/*
+ * Intel SMP support routines.
+ *
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ *
+ * This code is released under the GNU General Public License version 2 or
+ * later.
+ */
+
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+
+#include <asm/mtrr.h>
+#include <asm/tlbflush.h>
+#if 0
+#include <mach_apic.h>
+#endif
+#include <asm-xen/evtchn.h>
+
+#define xxprint(msg) HYPERVISOR_console_io(CONSOLEIO_write, strlen(msg), msg)
+
+/*
+ * Some notes on x86 processor bugs affecting SMP operation:
+ *
+ * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
+ * The Linux implications for SMP are handled as follows:
+ *
+ * Pentium III / [Xeon]
+ * None of the E1AP-E3AP errata are visible to the user.
+ *
+ * E1AP. see PII A1AP
+ * E2AP. see PII A2AP
+ * E3AP. see PII A3AP
+ *
+ * Pentium II / [Xeon]
+ * None of the A1AP-A3AP errata are visible to the user.
+ *
+ * A1AP. see PPro 1AP
+ * A2AP. see PPro 2AP
+ * A3AP. see PPro 7AP
+ *
+ * Pentium Pro
+ * None of 1AP-9AP errata are visible to the normal user,
+ * except occasional delivery of 'spurious interrupt' as trap #15.
+ * This is very rare and a non-problem.
+ *
+ * 1AP. Linux maps APIC as non-cacheable
+ * 2AP. worked around in hardware
+ * 3AP. fixed in C0 and above steppings microcode update.
+ * Linux does not use excessive STARTUP_IPIs.
+ * 4AP. worked around in hardware
+ * 5AP. symmetric IO mode (normal Linux operation) not affected.
+ * 'noapic' mode has vector 0xf filled out properly.
+ * 6AP. 'noapic' mode might be affected - fixed in later steppings
+ * 7AP. We do not assume writes to the LVT deassering IRQs
+ * 8AP. We do not enable low power mode (deep sleep) during MP bootup
+ * 9AP. We do not use mixed mode
+ *
+ * Pentium
+ * There is a marginal case where REP MOVS on 100MHz SMP
+ * machines with B stepping processors can fail. XXX should provide
+ * an L1cache=Writethrough or L1cache=off option.
+ *
+ * B stepping CPUs may hang. There are hardware work arounds
+ * for this. We warn about it in case your board doesn't have the work
+ * arounds. Basically thats so I can tell anyone with a B stepping
+ * CPU and SMP problems "tough".
+ *
+ * Specific items [From Pentium Processor Specification Update]
+ *
+ * 1AP. Linux doesn't use remote read
+ * 2AP. Linux doesn't trust APIC errors
+ * 3AP. We work around this
+ * 4AP. Linux never generated 3 interrupts of the same priority
+ * to cause a lost local interrupt.
+ * 5AP. Remote read is never used
+ * 6AP. not affected - worked around in hardware
+ * 7AP. not affected - worked around in hardware
+ * 8AP. worked around in hardware - we get explicit CS errors if not
+ * 9AP. only 'noapic' mode affected. Might generate spurious
+ * interrupts, we log only the first one and count the
+ * rest silently.
+ * 10AP. not affected - worked around in hardware
+ * 11AP. Linux reads the APIC between writes to avoid this, as per
+ * the documentation. Make sure you preserve this as it affects
+ * the C stepping chips too.
+ * 12AP. not affected - worked around in hardware
+ * 13AP. not affected - worked around in hardware
+ * 14AP. we always deassert INIT during bootup
+ * 15AP. not affected - worked around in hardware
+ * 16AP. not affected - worked around in hardware
+ * 17AP. not affected - worked around in hardware
+ * 18AP. not affected - worked around in hardware
+ * 19AP. not affected - worked around in BIOS
+ *
+ * If this sounds worrying believe me these bugs are either ___RARE___,
+ * or are signal timing bugs worked around in hardware and there's
+ * about nothing of note with C stepping upwards.
+ */
+
+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
+
+/*
+ * the following functions deal with sending IPIs between CPUs.
+ *
+ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
+ */
+
+static inline int __prepare_ICR (unsigned int shortcut, int vector)
+{
+ return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
+}
+
+static inline int __prepare_ICR2 (unsigned int mask)
+{
+ return SET_APIC_DEST_FIELD(mask);
+}
+
+DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
+
+static inline void __send_IPI_one(unsigned int cpu, int vector)
+{
+ unsigned int evtchn;
+
+ evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
+ // printk("send_IPI_mask_bitmask cpu %d vector %d evtchn %d\n", cpu, vector, evtchn);
+ if (evtchn) {
+#if 0
+ shared_info_t *s = HYPERVISOR_shared_info;
+ while (synch_test_bit(evtchn, &s->evtchn_pending[0]) ||
+ synch_test_bit(evtchn, &s->evtchn_mask[0]))
+ ;
+#endif
+ notify_via_evtchn(evtchn);
+ } else
+ printk("send_IPI to unbound port %d/%d",
+ cpu, vector);
+}
+
+void __send_IPI_shortcut(unsigned int shortcut, int vector)
+{
+ int cpu;
+
+ switch (shortcut) {
+ case APIC_DEST_SELF:
+ __send_IPI_one(smp_processor_id(), vector);
+ break;
+ case APIC_DEST_ALLBUT:
+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ if (cpu_isset(cpu, cpu_online_map)) {
+ __send_IPI_one(cpu, vector);
+ }
+ }
+ break;
+ default:
+ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
+ vector);
+ break;
+ }
+}
+
+void fastcall send_IPI_self(int vector)
+{
+ __send_IPI_shortcut(APIC_DEST_SELF, vector);
+}
+
+/*
+ * This is only used on smaller machines.
+ */
+void send_IPI_mask_bitmask(cpumask_t mask, int vector)
+{
+ unsigned long flags;
+ unsigned int cpu;
+
+ local_irq_save(flags);
+ WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
+
+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ if (cpu_isset(cpu, mask)) {
+ __send_IPI_one(cpu, vector);
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
+{
+
+ send_IPI_mask_bitmask(mask, vector);
+}
+
+#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
+
+#if 0 /* XEN */
+/*
+ * Smarter SMP flushing macros.
+ * c/o Linus Torvalds.
+ *
+ * These mean you can really definitely utterly forget about
+ * writing to user space from interrupts. (Its not allowed anyway).
+ *
+ * Optimizations Manfred Spraul <manfred@colorfullife.com>
+ */
+
+static cpumask_t flush_cpumask;
+static struct mm_struct * flush_mm;
+static unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+#define FLUSH_ALL 0xffffffff
+
+/*
+ * We cannot call mmdrop() because we are in interrupt context,
+ * instead update mm->cpu_vm_mask.
+ *
+ * We need to reload %cr3 since the page tables may be going
+ * away from under us..
+ */
+static inline void leave_mm (unsigned long cpu)
+{
+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
+ BUG();
+ cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
+ load_cr3(swapper_pg_dir);
+}
+
+/*
+ *
+ * The flush IPI assumes that a thread switch happens in this order:
+ * [cpu0: the cpu that switches]
+ * 1) switch_mm() either 1a) or 1b)
+ * 1a) thread switch to a different mm
+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
+ * Stop ipi delivery for the old mm. This is not synchronized with
+ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
+ * for the wrong mm, and in the worst case we perform a superflous
+ * tlb flush.
+ * 1a2) set cpu_tlbstate to TLBSTATE_OK
+ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
+ * was in lazy tlb mode.
+ * 1a3) update cpu_tlbstate[].active_mm
+ * Now cpu0 accepts tlb flushes for the new mm.
+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
+ * Now the other cpus will send tlb flush ipis.
+ * 1a4) change cr3.
+ * 1b) thread switch without mm change
+ * cpu_tlbstate[].active_mm is correct, cpu0 already handles
+ * flush ipis.
+ * 1b1) set cpu_tlbstate to TLBSTATE_OK
+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
+ * Atomically set the bit [other cpus will start sending flush ipis],
+ * and test the bit.
+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
+ * 2) switch %%esp, ie current
+ *
+ * The interrupt must handle 2 special cases:
+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
+ * runs in kernel space, the cpu could load tlb entries for user space
+ * pages.
+ *
+ * The good news is that cpu_tlbstate is local to each cpu, no
+ * write/read ordering problems.
+ */
+
+/*
+ * TLB flush IPI:
+ *
+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
+ * 2) Leave the mm if we are in the lazy tlb mode.
+ */
+
+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ unsigned long cpu;
+
+ cpu = get_cpu();
+
+ if (!cpu_isset(cpu, flush_cpumask))
+ goto out;
+ /*
+ * This was a BUG() but until someone can quote me the
+ * line from the intel manual that guarantees an IPI to
+ * multiple CPUs is retried _only_ on the erroring CPUs
+ * its staying as a return
+ *
+ * BUG();
+ */
+
+ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
+ if (flush_va == FLUSH_ALL)
+ local_flush_tlb();
+ else
+ __flush_tlb_one(flush_va);
+ } else
+ leave_mm(cpu);
+ }
+ smp_mb__before_clear_bit();
+ cpu_clear(cpu, flush_cpumask);
+ smp_mb__after_clear_bit();
+out:
+ put_cpu_no_resched();
+
+ return IRQ_HANDLED;
+}
+
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ unsigned long va)
+{
+ /*
+ * A couple of (to be removed) sanity checks:
+ *
+ * - current CPU must not be in mask
+ * - mask must exist :)
+ */
+ BUG_ON(cpus_empty(cpumask));
+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+ BUG_ON(!mm);
+
+ /* If a CPU which we ran on has gone down, OK. */
+ cpus_and(cpumask, cpumask, cpu_online_map);
+ if (cpus_empty(cpumask))
+ return;
+
+ /*
+ * i'm not happy about this global shared spinlock in the
+ * MM hot path, but we'll see how contended it is.
+ * Temporarily this turns IRQs off, so that lockups are
+ * detected by the NMI watchdog.
+ */
+ spin_lock(&tlbstate_lock);
+
+ flush_mm = mm;
+ flush_va = va;
+#if NR_CPUS <= BITS_PER_LONG
+ atomic_set_mask(cpumask, &flush_cpumask);
+#else
+ {
+ int k;
+ unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
+ unsigned long *cpu_mask = (unsigned long *)&cpumask;
+ for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
+ atomic_set_mask(cpu_mask[k], &flush_mask[k]);
+ }
+#endif
+ /*
+ * We have to send the IPI only to
+ * CPUs affected.
+ */
+ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
+
+ while (!cpus_empty(flush_cpumask))
+ /* nothing. lockup detection does not belong here */
+ mb();
+
+ flush_mm = NULL;
+ flush_va = 0;
+ spin_unlock(&tlbstate_lock);
+}
+
+void flush_tlb_current_task(void)
+{
+ struct mm_struct *mm = current->mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ preempt_enable();
+}
+
+void flush_tlb_mm (struct mm_struct * mm)
+{
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ if (current->active_mm == mm) {
+ if (current->mm)
+ local_flush_tlb();
+ else
+ leave_mm(smp_processor_id());
+ }
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ if (current->active_mm == mm) {
+ if(current->mm)
+ __flush_tlb_one(va);
+ else
+ leave_mm(smp_processor_id());
+ }
+
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, va);
+
+ preempt_enable();
+}
+
+static void do_flush_tlb_all(void* info)
+{
+ unsigned long cpu = smp_processor_id();
+
+ __flush_tlb_all();
+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
+ leave_mm(cpu);
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
+}
+
+#else
+
+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{ return 0; }
+void flush_tlb_current_task(void)
+{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
+void flush_tlb_mm(struct mm_struct * mm)
+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
+void flush_tlb_all(void)
+{ xen_tlb_flush_all(); }
+
+#endif /* XEN */
+
+/*
+ * this function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+void smp_send_reschedule(int cpu)
+{
+ WARN_ON(cpu_is_offline(cpu));
+ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
+}
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ */
+static DEFINE_SPINLOCK(call_lock);
+
+struct call_data_struct {
+ void (*func) (void *info);
+ void *info;
+ atomic_t started;
+ atomic_t finished;
+ int wait;
+};
+
+static struct call_data_struct * call_data;
+
+/*
+ * this function sends a 'generic call function' IPI to all other CPUs
+ * in the system.
+ */
+
+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+/*
+ * [SUMMARY] Run a function on all other CPUs.
+ * <func> The function to run. This must be fast and non-blocking.
+ * <info> An arbitrary pointer to pass to the function.
+ * <nonatomic> currently unused.
+ * <wait> If true, wait (atomically) until function has completed on other CPUs.
+ * [RETURNS] 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+{
+ struct call_data_struct data;
+ int cpus;
+
+ /* Holding any lock stops cpus from going down. */
+ spin_lock(&call_lock);
+ cpus = num_online_cpus()-1;
+
+ if (!cpus) {
+ spin_unlock(&call_lock);
+ return 0;
+ }
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+ call_data = &data;
+ mb();
+
+ /* Send a message to all other CPUs and wait for them to respond */
+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+
+ /* Wait for response */
+ while (atomic_read(&data.started) != cpus)
+ barrier();
+
+ if (wait)
+ while (atomic_read(&data.finished) != cpus)
+ barrier();
+ spin_unlock(&call_lock);
+
+ return 0;
+}
+
+static void stop_this_cpu (void * dummy)
+{
+ /*
+ * Remove this CPU:
+ */
+ cpu_clear(smp_processor_id(), cpu_online_map);
+ local_irq_disable();
+#if 1
+ xxprint("stop_this_cpu disable_local_APIC\n");
+#else
+ disable_local_APIC();
+#endif
+ if (cpu_data[smp_processor_id()].hlt_works_ok)
+ for(;;) __asm__("hlt");
+ for (;;);
+}
+
+/*
+ * this function calls the 'stop' function on all other CPUs in the system.
+ */
+
+void smp_send_stop(void)
+{
+ smp_call_function(stop_this_cpu, NULL, 1, 0);
+
+ local_irq_disable();
+#if 1
+ xxprint("smp_send_stop disable_local_APIC\n");
+#else
+ disable_local_APIC();
+#endif
+ local_irq_enable();
+}
+
+/*
+ * Reschedule call back. Nothing to do,
+ * all the work is done automatically when
+ * we return from the interrupt.
+ */
+irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+
+ return IRQ_HANDLED;
+}
+
+#include <linux/kallsyms.h>
+irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ void (*func) (void *info) = call_data->func;
+ void *info = call_data->info;
+ int wait = call_data->wait;
+
+ /*
+ * Notify initiating CPU that I've grabbed the data and am
+ * about to execute the function
+ */
+ mb();
+ atomic_inc(&call_data->started);
+ /*
+ * At this point the info structure may be out of scope unless wait==1
+ */
+ irq_enter();
+ (*func)(info);
+ irq_exit();
+
+ if (wait) {
+ mb();
+ atomic_inc(&call_data->finished);
+ }
+
+ return IRQ_HANDLED;
+}
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
new file mode 100644
index 0000000000..a716095376
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
@@ -0,0 +1,1437 @@
+/*
+ * x86 SMP booting functions
+ *
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *
+ * Much of the core SMP work is based on previous work by Thomas Radke, to
+ * whom a great many thanks are extended.
+ *
+ * Thanks to Intel for making available several different Pentium,
+ * Pentium Pro and Pentium-II/Xeon MP machines.
+ * Original development of Linux SMP code supported by Caldera.
+ *
+ * This code is released under the GNU General Public License version 2 or
+ * later.
+ *
+ * Fixes
+ * Felix Koop : NR_CPUS used properly
+ * Jose Renau : Handle single CPU case.
+ * Alan Cox : By repeated request 8) - Total BogoMIPS report.
+ * Greg Wright : Fix for kernel stacks panic.
+ * Erich Boleyn : MP v1.4 and additional changes.
+ * Matthias Sattler : Changes for 2.1 kernel map.
+ * Michel Lespinasse : Changes for 2.1 kernel map.
+ * Michael Chastain : Change trampoline.S to gnu as.
+ * Alan Cox : Dumb bug: 'B' step PPro's are fine
+ * Ingo Molnar : Added APIC timers, based on code
+ * from Jose Renau
+ * Ingo Molnar : various cleanups and rewrites
+ * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs
+ * Martin J. Bligh : Added support for multi-quad systems
+ * Dave Jones : Report invalid combinations of Athlon CPUs.
+* Rusty Russell : Hacked into shape for new "hotplug" boot process. */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/smp_lock.h>
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/percpu.h>
+
+#include <linux/delay.h>
+#include <linux/mc146818rtc.h>
+#include <asm/tlbflush.h>
+#include <asm/desc.h>
+#include <asm/arch_hooks.h>
+
+#ifndef CONFIG_X86_IO_APIC
+#define Dprintk(args...)
+#endif
+#include <mach_wakecpu.h>
+#include <smpboot_hooks.h>
+
+/* Set if we find a B stepping CPU */
+static int __initdata smp_b_stepping;
+
+/* Number of siblings per CPU package */
+int smp_num_siblings = 1;
+int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
+EXPORT_SYMBOL(phys_proc_id);
+
+/* bitmap of online cpus */
+cpumask_t cpu_online_map;
+
+cpumask_t cpu_callin_map;
+cpumask_t cpu_callout_map;
+static cpumask_t smp_commenced_mask;
+
+/* Per CPU bogomips and other parameters */
+struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
+
+u8 x86_cpu_to_apicid[NR_CPUS] =
+ { [0 ... NR_CPUS-1] = 0xff };
+EXPORT_SYMBOL(x86_cpu_to_apicid);
+
+/* Set when the idlers are all forked */
+int smp_threads_ready;
+
+#if 0
+/*
+ * Trampoline 80x86 program as an array.
+ */
+
+extern unsigned char trampoline_data [];
+extern unsigned char trampoline_end [];
+static unsigned char *trampoline_base;
+static int trampoline_exec;
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+/* State of each CPU. */
+DEFINE_PER_CPU(int, cpu_state) = { 0 };
+#endif
+
+#if 0
+/*
+ * Currently trivial. Write the real->protected mode
+ * bootstrap into the page concerned. The caller
+ * has made sure it's suitably aligned.
+ */
+
+static unsigned long __init setup_trampoline(void)
+{
+ memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
+ return virt_to_phys(trampoline_base);
+}
+#endif
+
+/*
+ * We are called very early to get the low memory for the
+ * SMP bootup trampoline page.
+ */
+void __init smp_alloc_memory(void)
+{
+#if 1
+ int cpu;
+
+ for (cpu = 1; cpu < NR_CPUS; cpu++) {
+ cpu_gdt_descr[cpu].address = (unsigned long)
+ alloc_bootmem_low_pages(PAGE_SIZE);
+ /* XXX free unused pages later */
+ }
+#else
+ trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
+ /*
+ * Has to be in very low memory so we can execute
+ * real-mode AP code.
+ */
+ if (__pa(trampoline_base) >= 0x9F000)
+ BUG();
+ /*
+ * Make the SMP trampoline executable:
+ */
+ trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
+#endif
+}
+
+/*
+ * The bootstrap kernel entry code has set these up. Save them for
+ * a given CPU
+ */
+
+static void __init smp_store_cpu_info(int id)
+{
+ struct cpuinfo_x86 *c = cpu_data + id;
+
+ *c = boot_cpu_data;
+ if (id!=0)
+ identify_cpu(c);
+ /*
+ * Mask B, Pentium, but not Pentium MMX
+ */
+ if (c->x86_vendor == X86_VENDOR_INTEL &&
+ c->x86 == 5 &&
+ c->x86_mask >= 1 && c->x86_mask <= 4 &&
+ c->x86_model <= 3)
+ /*
+ * Remember we have B step Pentia with bugs
+ */
+ smp_b_stepping = 1;
+
+ /*
+ * Certain Athlons might work (for various values of 'work') in SMP
+ * but they are not certified as MP capable.
+ */
+ if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
+
+ /* Athlon 660/661 is valid. */
+ if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
+ goto valid_k7;
+
+ /* Duron 670 is valid */
+ if ((c->x86_model==7) && (c->x86_mask==0))
+ goto valid_k7;
+
+ /*
+ * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
+ * It's worth noting that the A5 stepping (662) of some Athlon XP's
+ * have the MP bit set.
+ * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
+ */
+ if (((c->x86_model==6) && (c->x86_mask>=2)) ||
+ ((c->x86_model==7) && (c->x86_mask>=1)) ||
+ (c->x86_model> 7))
+ if (cpu_has_mp)
+ goto valid_k7;
+
+ /* If we get here, it's not a certified SMP capable AMD system. */
+ tainted |= TAINT_UNSAFE_SMP;
+ }
+
+valid_k7:
+ ;
+}
+
+#if 0
+/*
+ * TSC synchronization.
+ *
+ * We first check whether all CPUs have their TSC's synchronized,
+ * then we print a warning if not, and always resync.
+ */
+
+static atomic_t tsc_start_flag = ATOMIC_INIT(0);
+static atomic_t tsc_count_start = ATOMIC_INIT(0);
+static atomic_t tsc_count_stop = ATOMIC_INIT(0);
+static unsigned long long tsc_values[NR_CPUS];
+
+#define NR_LOOPS 5
+
+static void __init synchronize_tsc_bp (void)
+{
+ int i;
+ unsigned long long t0;
+ unsigned long long sum, avg;
+ long long delta;
+ unsigned long one_usec;
+ int buggy = 0;
+
+ printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
+
+ /* convert from kcyc/sec to cyc/usec */
+ one_usec = cpu_khz / 1000;
+
+ atomic_set(&tsc_start_flag, 1);
+ wmb();
+
+ /*
+ * We loop a few times to get a primed instruction cache,
+ * then the last pass is more or less synchronized and
+ * the BP and APs set their cycle counters to zero all at
+ * once. This reduces the chance of having random offsets
+ * between the processors, and guarantees that the maximum
+ * delay between the cycle counters is never bigger than
+ * the latency of information-passing (cachelines) between
+ * two CPUs.
+ */
+ for (i = 0; i < NR_LOOPS; i++) {
+ /*
+ * all APs synchronize but they loop on '== num_cpus'
+ */
+ while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
+ mb();
+ atomic_set(&tsc_count_stop, 0);
+ wmb();
+ /*
+ * this lets the APs save their current TSC:
+ */
+ atomic_inc(&tsc_count_start);
+
+ rdtscll(tsc_values[smp_processor_id()]);
+ /*
+ * We clear the TSC in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ write_tsc(0, 0);
+
+ /*
+ * Wait for all APs to leave the synchronization point:
+ */
+ while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
+ mb();
+ atomic_set(&tsc_count_start, 0);
+ wmb();
+ atomic_inc(&tsc_count_stop);
+ }
+
+ sum = 0;
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_isset(i, cpu_callout_map)) {
+ t0 = tsc_values[i];
+ sum += t0;
+ }
+ }
+ avg = sum;
+ do_div(avg, num_booting_cpus());
+
+ sum = 0;
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_isset(i, cpu_callout_map))
+ continue;
+ delta = tsc_values[i] - avg;
+ if (delta < 0)
+ delta = -delta;
+ /*
+ * We report bigger than 2 microseconds clock differences.
+ */
+ if (delta > 2*one_usec) {
+ long realdelta;
+ if (!buggy) {
+ buggy = 1;
+ printk("\n");
+ }
+ realdelta = delta;
+ do_div(realdelta, one_usec);
+ if (tsc_values[i] < avg)
+ realdelta = -realdelta;
+
+ printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta);
+ }
+
+ sum += delta;
+ }
+ if (!buggy)
+ printk("passed.\n");
+}
+
+static void __init synchronize_tsc_ap (void)
+{
+ int i;
+
+ /*
+ * Not every cpu is online at the time
+ * this gets called, so we first wait for the BP to
+ * finish SMP initialization:
+ */
+ while (!atomic_read(&tsc_start_flag)) mb();
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ atomic_inc(&tsc_count_start);
+ while (atomic_read(&tsc_count_start) != num_booting_cpus())
+ mb();
+
+ rdtscll(tsc_values[smp_processor_id()]);
+ if (i == NR_LOOPS-1)
+ write_tsc(0, 0);
+
+ atomic_inc(&tsc_count_stop);
+ while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
+ }
+}
+#undef NR_LOOPS
+#endif
+
+extern void calibrate_delay(void);
+
+static atomic_t init_deasserted;
+
+void __init smp_callin(void)
+{
+ int cpuid, phys_id;
+ unsigned long timeout;
+
+#if 0
+ /*
+ * If waken up by an INIT in an 82489DX configuration
+ * we may get here before an INIT-deassert IPI reaches
+ * our local APIC. We have to wait for the IPI or we'll
+ * lock up on an APIC access.
+ */
+ wait_for_init_deassert(&init_deasserted);
+#endif
+
+ /*
+ * (This works even if the APIC is not enabled.)
+ */
+ phys_id = smp_processor_id();
+ cpuid = smp_processor_id();
+ if (cpu_isset(cpuid, cpu_callin_map)) {
+ printk("huh, phys CPU#%d, CPU#%d already present??\n",
+ phys_id, cpuid);
+ BUG();
+ }
+ Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
+
+ /*
+ * STARTUP IPIs are fragile beasts as they might sometimes
+ * trigger some glue motherboard logic. Complete APIC bus
+ * silence for 1 second, this overestimates the time the
+ * boot CPU is spending to send the up to 2 STARTUP IPIs
+ * by a factor of two. This should be enough.
+ */
+
+ /*
+ * Waiting 2s total for startup (udelay is not yet working)
+ */
+ timeout = jiffies + 2*HZ;
+ while (time_before(jiffies, timeout)) {
+ /*
+ * Has the boot CPU finished it's STARTUP sequence?
+ */
+ if (cpu_isset(cpuid, cpu_callout_map))
+ break;
+ rep_nop();
+ }
+
+ if (!time_before(jiffies, timeout)) {
+ printk("BUG: CPU%d started up but did not get a callout!\n",
+ cpuid);
+ BUG();
+ }
+
+#if 0
+ /*
+ * the boot CPU has finished the init stage and is spinning
+ * on callin_map until we finish. We are free to set up this
+ * CPU, first the APIC. (this is probably redundant on most
+ * boards)
+ */
+
+ Dprintk("CALLIN, before setup_local_APIC().\n");
+ smp_callin_clear_local_apic();
+ setup_local_APIC();
+#endif
+ map_cpu_to_logical_apicid();
+
+ /*
+ * Get our bogomips.
+ */
+ calibrate_delay();
+ Dprintk("Stack at about %p\n",&cpuid);
+
+ /*
+ * Save our processor parameters
+ */
+ smp_store_cpu_info(cpuid);
+
+#if 0
+ disable_APIC_timer();
+#endif
+
+ /*
+ * Allow the master to continue.
+ */
+ cpu_set(cpuid, cpu_callin_map);
+
+#if 0
+ /*
+ * Synchronize the TSC with the BP
+ */
+ if (cpu_has_tsc && cpu_khz)
+ synchronize_tsc_ap();
+#endif
+}
+
+int cpucount;
+
+
+static irqreturn_t ldebug_interrupt(
+ int irq, void *dev_id, struct pt_regs *regs)
+{
+ return IRQ_HANDLED;
+}
+
+static DEFINE_PER_CPU(int, ldebug_irq);
+static char ldebug_name[NR_CPUS][15];
+
+void ldebug_setup(void)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(ldebug_irq, cpu) = bind_virq_to_irq(VIRQ_DEBUG);
+ sprintf(ldebug_name[cpu], "ldebug%d", cpu);
+ BUG_ON(request_irq(per_cpu(ldebug_irq, cpu), ldebug_interrupt,
+ SA_INTERRUPT, ldebug_name[cpu], NULL));
+}
+
+
+extern void local_setup_timer(void);
+
+/*
+ * Activate a secondary processor.
+ */
+static void __init start_secondary(void *unused)
+{
+ /*
+ * Dont put anything before smp_callin(), SMP
+ * booting is too fragile that we want to limit the
+ * things done here to the most necessary things.
+ */
+ cpu_init();
+ smp_callin();
+ while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+ rep_nop();
+ local_setup_timer();
+ ldebug_setup();
+ smp_intr_init();
+ local_irq_enable();
+ /*
+ * low-memory mappings have been cleared, flush them from
+ * the local TLBs too.
+ */
+ local_flush_tlb();
+ cpu_set(smp_processor_id(), cpu_online_map);
+
+ /* We can take interrupts now: we're officially "up". */
+ local_irq_enable();
+
+ wmb();
+ cpu_idle();
+}
+
+/*
+ * Everything has been set up for the secondary
+ * CPUs - they just need to reload everything
+ * from the task structure
+ * This function must not return.
+ */
+void __init initialize_secondary(void)
+{
+ /*
+ * We don't actually need to load the full TSS,
+ * basically just the stack pointer and the eip.
+ */
+
+ asm volatile(
+ "movl %0,%%esp\n\t"
+ "jmp *%1"
+ :
+ :"r" (current->thread.esp),"r" (current->thread.eip));
+}
+
+extern struct {
+ void * esp;
+ unsigned short ss;
+} stack_start;
+
+#ifdef CONFIG_NUMA
+
+/* which logical CPUs are on which nodes */
+cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
+ { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
+/* which node each logical CPU is on */
+int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
+EXPORT_SYMBOL(cpu_2_node);
+
+/* set up a mapping between cpu and node. */
+static inline void map_cpu_to_node(int cpu, int node)
+{
+ printk("Mapping cpu %d to node %d\n", cpu, node);
+ cpu_set(cpu, node_2_cpu_mask[node]);
+ cpu_2_node[cpu] = node;
+}
+
+/* undo a mapping between cpu and node. */
+static inline void unmap_cpu_to_node(int cpu)
+{
+ int node;
+
+ printk("Unmapping cpu %d from all nodes\n", cpu);
+ for (node = 0; node < MAX_NUMNODES; node ++)
+ cpu_clear(cpu, node_2_cpu_mask[node]);
+ cpu_2_node[cpu] = 0;
+}
+#else /* !CONFIG_NUMA */
+
+#define map_cpu_to_node(cpu, node) ({})
+#define unmap_cpu_to_node(cpu) ({})
+
+#endif /* CONFIG_NUMA */
+
+u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+
+void map_cpu_to_logical_apicid(void)
+{
+ int cpu = smp_processor_id();
+ int apicid = smp_processor_id();
+
+ cpu_2_logical_apicid[cpu] = apicid;
+ map_cpu_to_node(cpu, apicid_to_node(apicid));
+}
+
+void unmap_cpu_to_logical_apicid(int cpu)
+{
+ cpu_2_logical_apicid[cpu] = BAD_APICID;
+ unmap_cpu_to_node(cpu);
+}
+
+#if APIC_DEBUG
+static inline void __inquire_remote_apic(int apicid)
+{
+ int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
+ char *names[] = { "ID", "VERSION", "SPIV" };
+ int timeout, status;
+
+ printk("Inquiring remote APIC #%d...\n", apicid);
+
+ for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
+ printk("... APIC #%d %s: ", apicid, names[i]);
+
+ /*
+ * Wait for idle.
+ */
+ apic_wait_icr_idle();
+
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
+ apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
+
+ timeout = 0;
+ do {
+ udelay(100);
+ status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
+ } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
+
+ switch (status) {
+ case APIC_ICR_RR_VALID:
+ status = apic_read(APIC_RRR);
+ printk("%08x\n", status);
+ break;
+ default:
+ printk("failed\n");
+ }
+ }
+}
+#endif
+
+#if 0
+#ifdef WAKE_SECONDARY_VIA_NMI
+/*
+ * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
+ * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
+ * won't ... remember to clear down the APIC, etc later.
+ */
+static int __init
+wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
+{
+ unsigned long send_status = 0, accept_status = 0;
+ int timeout, maxlvt;
+
+ /* Target chip */
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
+
+ /* Boot on the stack */
+ /* Kick the second */
+ apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
+
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+ do {
+ Dprintk("+");
+ udelay(100);
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ } while (send_status && (timeout++ < 1000));
+
+ /*
+ * Give the other CPU some time to accept the IPI.
+ */
+ udelay(200);
+ /*
+ * Due to the Pentium erratum 3AP.
+ */
+ maxlvt = get_maxlvt();
+ if (maxlvt > 3) {
+ apic_read_around(APIC_SPIV);
+ apic_write(APIC_ESR, 0);
+ }
+ accept_status = (apic_read(APIC_ESR) & 0xEF);
+ Dprintk("NMI sent.\n");
+
+ if (send_status)
+ printk("APIC never delivered???\n");
+ if (accept_status)
+ printk("APIC delivery error (%lx).\n", accept_status);
+
+ return (send_status | accept_status);
+}
+#endif /* WAKE_SECONDARY_VIA_NMI */
+
+#ifdef WAKE_SECONDARY_VIA_INIT
+static int __init
+wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
+{
+ unsigned long send_status = 0, accept_status = 0;
+ int maxlvt, timeout, num_starts, j;
+
+ /*
+ * Be paranoid about clearing APIC errors.
+ */
+ if (APIC_INTEGRATED(apic_version[phys_apicid])) {
+ apic_read_around(APIC_SPIV);
+ apic_write(APIC_ESR, 0);
+ apic_read(APIC_ESR);
+ }
+
+ Dprintk("Asserting INIT.\n");
+
+ /*
+ * Turn INIT on target chip
+ */
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+
+ /*
+ * Send IPI
+ */
+ apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
+ | APIC_DM_INIT);
+
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+ do {
+ Dprintk("+");
+ udelay(100);
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ } while (send_status && (timeout++ < 1000));
+
+ mdelay(10);
+
+ Dprintk("Deasserting INIT.\n");
+
+ /* Target chip */
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+
+ /* Send IPI */
+ apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
+
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+ do {
+ Dprintk("+");
+ udelay(100);
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ } while (send_status && (timeout++ < 1000));
+
+ atomic_set(&init_deasserted, 1);
+
+ /*
+ * Should we send STARTUP IPIs ?
+ *
+ * Determine this based on the APIC version.
+ * If we don't have an integrated APIC, don't send the STARTUP IPIs.
+ */
+ if (APIC_INTEGRATED(apic_version[phys_apicid]))
+ num_starts = 2;
+ else
+ num_starts = 0;
+
+ /*
+ * Run STARTUP IPI loop.
+ */
+ Dprintk("#startup loops: %d.\n", num_starts);
+
+ maxlvt = get_maxlvt();
+
+ for (j = 1; j <= num_starts; j++) {
+ Dprintk("Sending STARTUP #%d.\n",j);
+ apic_read_around(APIC_SPIV);
+ apic_write(APIC_ESR, 0);
+ apic_read(APIC_ESR);
+ Dprintk("After apic_write.\n");
+
+ /*
+ * STARTUP IPI
+ */
+
+ /* Target chip */
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+
+ /* Boot on the stack */
+ /* Kick the second */
+ apic_write_around(APIC_ICR, APIC_DM_STARTUP
+ | (start_eip >> 12));
+
+ /*
+ * Give the other CPU some time to accept the IPI.
+ */
+ udelay(300);
+
+ Dprintk("Startup point 1.\n");
+
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+ do {
+ Dprintk("+");
+ udelay(100);
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ } while (send_status && (timeout++ < 1000));
+
+ /*
+ * Give the other CPU some time to accept the IPI.
+ */
+ udelay(200);
+ /*
+ * Due to the Pentium erratum 3AP.
+ */
+ if (maxlvt > 3) {
+ apic_read_around(APIC_SPIV);
+ apic_write(APIC_ESR, 0);
+ }
+ accept_status = (apic_read(APIC_ESR) & 0xEF);
+ if (send_status || accept_status)
+ break;
+ }
+ Dprintk("After Startup.\n");
+
+ if (send_status)
+ printk("APIC never delivered???\n");
+ if (accept_status)
+ printk("APIC delivery error (%lx).\n", accept_status);
+
+ return (send_status | accept_status);
+}
+#endif /* WAKE_SECONDARY_VIA_INIT */
+#endif
+
+extern cpumask_t cpu_initialized;
+
+static int __init do_boot_cpu(int apicid)
+/*
+ * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
+ * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
+ * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
+ */
+{
+ struct task_struct *idle;
+ unsigned long boot_error;
+ int timeout, cpu;
+ unsigned long start_eip;
+#if 0
+ unsigned short nmi_high = 0, nmi_low = 0;
+#endif
+ vcpu_guest_context_t ctxt;
+ extern void startup_32_smp(void);
+ extern void hypervisor_callback(void);
+ extern void failsafe_callback(void);
+ extern void smp_trap_init(trap_info_t *);
+ int i;
+
+ cpu = ++cpucount;
+ /*
+ * We can't use kernel_thread since we must avoid to
+ * reschedule the child.
+ */
+ idle = fork_idle(cpu);
+ if (IS_ERR(idle))
+ panic("failed fork for CPU %d", cpu);
+ idle->thread.eip = (unsigned long) start_secondary;
+ /* start_eip had better be page-aligned! */
+ start_eip = (unsigned long)startup_32_smp;
+
+ /* So we see what's up */
+ printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
+ /* Stack for startup_32 can be just as for start_secondary onwards */
+ stack_start.esp = (void *) idle->thread.esp;
+
+ irq_ctx_init(cpu);
+
+ /*
+ * This grunge runs the startup process for
+ * the targeted processor.
+ */
+
+ atomic_set(&init_deasserted, 0);
+
+#if 1
+ if (cpu_gdt_descr[0].size > PAGE_SIZE)
+ BUG();
+ cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
+ memcpy((void *)cpu_gdt_descr[cpu].address,
+ (void *)cpu_gdt_descr[0].address, cpu_gdt_descr[0].size);
+
+ memset(&ctxt, 0, sizeof(ctxt));
+
+ ctxt.user_regs.ds = __USER_DS;
+ ctxt.user_regs.es = __USER_DS;
+ ctxt.user_regs.fs = 0;
+ ctxt.user_regs.gs = 0;
+ ctxt.user_regs.ss = __KERNEL_DS;
+ ctxt.user_regs.cs = __KERNEL_CS;
+ ctxt.user_regs.eip = start_eip;
+ ctxt.user_regs.esp = idle->thread.esp;
+ ctxt.user_regs.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12);
+
+ /* FPU is set up to default initial state. */
+ memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
+
+ /* Virtual IDT is empty at start-of-day. */
+ for ( i = 0; i < 256; i++ )
+ {
+ ctxt.trap_ctxt[i].vector = i;
+ ctxt.trap_ctxt[i].cs = FLAT_KERNEL_CS;
+ }
+ smp_trap_init(ctxt.trap_ctxt);
+
+ /* No LDT. */
+ ctxt.ldt_ents = 0;
+
+ {
+ unsigned long va;
+ int f;
+
+ for (va = cpu_gdt_descr[cpu].address, f = 0;
+ va < cpu_gdt_descr[cpu].address + cpu_gdt_descr[cpu].size;
+ va += PAGE_SIZE, f++) {
+ ctxt.gdt_frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
+ make_page_readonly((void *)va);
+ }
+ ctxt.gdt_ents = cpu_gdt_descr[cpu].size / 8;
+ }
+
+ /* Ring 1 stack is the initial stack. */
+ ctxt.kernel_ss = __KERNEL_DS;
+ ctxt.kernel_sp = idle->thread.esp;
+
+ /* Callback handlers. */
+ ctxt.event_callback_cs = __KERNEL_CS;
+ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
+ ctxt.failsafe_callback_cs = __KERNEL_CS;
+ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
+
+ ctxt.pt_base = (unsigned long)virt_to_machine(swapper_pg_dir);
+
+ boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt);
+
+ if (!boot_error) {
+ /*
+ * allow APs to start initializing.
+ */
+ Dprintk("Before Callout %d.\n", cpu);
+ cpu_set(cpu, cpu_callout_map);
+ Dprintk("After Callout %d.\n", cpu);
+
+ /*
+ * Wait 5s total for a response
+ */
+ for (timeout = 0; timeout < 50000; timeout++) {
+ if (cpu_isset(cpu, cpu_callin_map))
+ break; /* It has booted */
+ udelay(100);
+ }
+
+ if (cpu_isset(cpu, cpu_callin_map)) {
+ /* number CPUs logically, starting from 1 (BSP is 0) */
+ Dprintk("OK.\n");
+ printk("CPU%d: ", cpu);
+ print_cpu_info(&cpu_data[cpu]);
+ Dprintk("CPU has booted.\n");
+ } else {
+ boot_error= 1;
+ }
+ }
+ x86_cpu_to_apicid[cpu] = apicid;
+ if (boot_error) {
+ /* Try to put things back the way they were before ... */
+ unmap_cpu_to_logical_apicid(cpu);
+ cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
+ cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
+ cpucount--;
+ }
+
+#else
+ Dprintk("Setting warm reset code and vector.\n");
+
+ store_NMI_vector(&nmi_high, &nmi_low);
+
+ smpboot_setup_warm_reset_vector(start_eip);
+
+ /*
+ * Starting actual IPI sequence...
+ */
+ boot_error = wakeup_secondary_cpu(apicid, start_eip);
+
+ if (!boot_error) {
+ /*
+ * allow APs to start initializing.
+ */
+ Dprintk("Before Callout %d.\n", cpu);
+ cpu_set(cpu, cpu_callout_map);
+ Dprintk("After Callout %d.\n", cpu);
+
+ /*
+ * Wait 5s total for a response
+ */
+ for (timeout = 0; timeout < 50000; timeout++) {
+ if (cpu_isset(cpu, cpu_callin_map))
+ break; /* It has booted */
+ udelay(100);
+ }
+
+ if (cpu_isset(cpu, cpu_callin_map)) {
+ /* number CPUs logically, starting from 1 (BSP is 0) */
+ Dprintk("OK.\n");
+ printk("CPU%d: ", cpu);
+ print_cpu_info(&cpu_data[cpu]);
+ Dprintk("CPU has booted.\n");
+ } else {
+ boot_error= 1;
+ if (*((volatile unsigned char *)trampoline_base)
+ == 0xA5)
+ /* trampoline started but...? */
+ printk("Stuck ??\n");
+ else
+ /* trampoline code not run */
+ printk("Not responding.\n");
+ inquire_remote_apic(apicid);
+ }
+ }
+ x86_cpu_to_apicid[cpu] = apicid;
+ if (boot_error) {
+ /* Try to put things back the way they were before ... */
+ unmap_cpu_to_logical_apicid(cpu);
+ cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
+ cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
+ cpucount--;
+ }
+
+ /* mark "stuck" area as not stuck */
+ *((volatile unsigned long *)trampoline_base) = 0;
+#endif
+
+ return boot_error;
+}
+
+cycles_t cacheflush_time;
+unsigned long cache_decay_ticks;
+
+static void smp_tune_scheduling (void)
+{
+ unsigned long cachesize; /* kB */
+ unsigned long bandwidth = 350; /* MB/s */
+ /*
+ * Rough estimation for SMP scheduling, this is the number of
+ * cycles it takes for a fully memory-limited process to flush
+ * the SMP-local cache.
+ *
+ * (For a P5 this pretty much means we will choose another idle
+ * CPU almost always at wakeup time (this is due to the small
+ * L1 cache), on PIIs it's around 50-100 usecs, depending on
+ * the cache size)
+ */
+
+ if (!cpu_khz) {
+ /*
+ * this basically disables processor-affinity
+ * scheduling on SMP without a TSC.
+ */
+ cacheflush_time = 0;
+ return;
+ } else {
+ cachesize = boot_cpu_data.x86_cache_size;
+ if (cachesize == -1) {
+ cachesize = 16; /* Pentiums, 2x8kB cache */
+ bandwidth = 100;
+ }
+
+ cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
+ }
+
+ cache_decay_ticks = (long)cacheflush_time/cpu_khz + 1;
+
+ printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
+ (long)cacheflush_time/(cpu_khz/1000),
+ ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
+ printk("task migration cache decay timeout: %ld msecs.\n",
+ cache_decay_ticks);
+}
+
+/*
+ * Cycle through the processors sending APIC IPIs to boot each.
+ */
+
+#if 0
+static int boot_cpu_logical_apicid;
+#endif
+/* Where the IO area was mapped on multiquad, always 0 otherwise */
+void *xquad_portio;
+
+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+
+static void __init smp_boot_cpus(unsigned int max_cpus)
+{
+ int cpu, kicked;
+ unsigned long bogosum = 0;
+#if 0
+ int apicid, bit;
+#endif
+
+ /*
+ * Setup boot CPU information
+ */
+ smp_store_cpu_info(0); /* Final full version of the data */
+ printk("CPU%d: ", 0);
+ print_cpu_info(&cpu_data[0]);
+
+#if 0
+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+ boot_cpu_logical_apicid = logical_smp_processor_id();
+ x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
+#else
+ // boot_cpu_physical_apicid = 0;
+ // boot_cpu_logical_apicid = 0;
+ x86_cpu_to_apicid[0] = 0;
+#endif
+
+ current_thread_info()->cpu = 0;
+ smp_tune_scheduling();
+ cpus_clear(cpu_sibling_map[0]);
+ cpu_set(0, cpu_sibling_map[0]);
+
+#ifdef CONFIG_X86_IO_APIC
+ /*
+ * If we couldn't find an SMP configuration at boot time,
+ * get out of here now!
+ */
+ if (!smp_found_config && !acpi_lapic) {
+ printk(KERN_NOTICE "SMP motherboard not detected.\n");
+ smpboot_clear_io_apic_irqs();
+#if 0
+ phys_cpu_present_map = physid_mask_of_physid(0);
+#endif
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (APIC_init_uniprocessor())
+ printk(KERN_NOTICE "Local APIC not detected."
+ " Using dummy APIC emulation.\n");
+#endif
+ map_cpu_to_logical_apicid();
+ return;
+ }
+#endif
+
+#if 0
+ /*
+ * Should not be necessary because the MP table should list the boot
+ * CPU too, but we do it for the sake of robustness anyway.
+ * Makes no sense to do this check in clustered apic mode, so skip it
+ */
+ if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
+ printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
+ boot_cpu_physical_apicid);
+ physid_set(hard_smp_processor_id(), phys_cpu_present_map);
+ }
+
+ /*
+ * If we couldn't find a local APIC, then get out of here now!
+ */
+ if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
+ printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
+ boot_cpu_physical_apicid);
+ printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
+ smpboot_clear_io_apic_irqs();
+ phys_cpu_present_map = physid_mask_of_physid(0);
+ return;
+ }
+
+ verify_local_APIC();
+#endif
+
+ /*
+ * If SMP should be disabled, then really disable it!
+ */
+ if (!max_cpus) {
+ HYPERVISOR_shared_info->n_vcpu = 1;
+ printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
+ smpboot_clear_io_apic_irqs();
+#if 0
+ phys_cpu_present_map = physid_mask_of_physid(0);
+#endif
+ return;
+ }
+
+ smp_intr_init();
+
+#if 0
+ connect_bsp_APIC();
+ setup_local_APIC();
+#endif
+ map_cpu_to_logical_apicid();
+#if 0
+
+
+ setup_portio_remap();
+
+ /*
+ * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
+ *
+ * In clustered apic mode, phys_cpu_present_map is a constructed thus:
+ * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
+ * clustered apic ID.
+ */
+ Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
+#endif
+ Dprintk("CPU present map: %lx\n",
+ (1UL << HYPERVISOR_shared_info->n_vcpu) - 1);
+
+ kicked = 1;
+ for (cpu = 1; kicked < NR_CPUS &&
+ cpu < HYPERVISOR_shared_info->n_vcpu; cpu++) {
+ if (max_cpus <= cpucount+1)
+ continue;
+
+ if (do_boot_cpu(cpu))
+ printk("CPU #%d not responding - cannot use it.\n",
+ cpu);
+ else
+ ++kicked;
+ }
+
+#if 0
+ /*
+ * Cleanup possible dangling ends...
+ */
+ smpboot_restore_warm_reset_vector();
+#endif
+
+ /*
+ * Allow the user to impress friends.
+ */
+ Dprintk("Before bogomips.\n");
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ if (cpu_isset(cpu, cpu_callout_map))
+ bogosum += cpu_data[cpu].loops_per_jiffy;
+ printk(KERN_INFO
+ "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+ cpucount+1,
+ bogosum/(500000/HZ),
+ (bogosum/(5000/HZ))%100);
+
+ Dprintk("Before bogocount - setting activated=1.\n");
+
+ if (smp_b_stepping)
+ printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
+
+ /*
+ * Don't taint if we are running SMP kernel on a single non-MP
+ * approved Athlon
+ */
+ if (tainted & TAINT_UNSAFE_SMP) {
+ if (cpucount)
+ printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
+ else
+ tainted &= ~TAINT_UNSAFE_SMP;
+ }
+
+ Dprintk("Boot done.\n");
+
+ /*
+ * construct cpu_sibling_map[], so that we can tell sibling CPUs
+ * efficiently.
+ */
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ cpus_clear(cpu_sibling_map[cpu]);
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ int siblings = 0;
+ int i;
+ if (!cpu_isset(cpu, cpu_callout_map))
+ continue;
+
+ if (smp_num_siblings > 1) {
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_isset(i, cpu_callout_map))
+ continue;
+ if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ siblings++;
+ cpu_set(i, cpu_sibling_map[cpu]);
+ }
+ }
+ } else {
+ siblings++;
+ cpu_set(cpu, cpu_sibling_map[cpu]);
+ }
+
+ if (siblings != smp_num_siblings)
+ printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
+ }
+
+#if 0
+ if (nmi_watchdog == NMI_LOCAL_APIC)
+ check_nmi_watchdog();
+#endif
+
+ smpboot_setup_io_apic();
+
+#if 0
+ setup_boot_APIC_clock();
+
+ /*
+ * Synchronize the TSC with the AP
+ */
+ if (cpu_has_tsc && cpucount && cpu_khz)
+ synchronize_tsc_bp();
+#endif
+}
+
+/* These are wrappers to interface to the new boot process. Someone
+ who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ smp_commenced_mask = cpumask_of_cpu(0);
+ cpu_callin_map = cpumask_of_cpu(0);
+ mb();
+ smp_boot_cpus(max_cpus);
+}
+
+void __devinit smp_prepare_boot_cpu(void)
+{
+ cpu_set(smp_processor_id(), cpu_online_map);
+ cpu_set(smp_processor_id(), cpu_callout_map);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* must be called with the cpucontrol mutex held */
+static int __devinit cpu_enable(unsigned int cpu)
+{
+ /* get the target out of its holding state */
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+ wmb();
+
+ /* wait for the processor to ack it. timeout? */
+ while (!cpu_online(cpu))
+ cpu_relax();
+
+ fixup_irqs(cpu_online_map);
+ /* counter the disable in fixup_irqs() */
+ local_irq_enable();
+ return 0;
+}
+
+int __cpu_disable(void)
+{
+ cpumask_t map = cpu_online_map;
+ int cpu = smp_processor_id();
+
+ /*
+ * Perhaps use cpufreq to drop frequency, but that could go
+ * into generic code.
+ *
+ * We won't take down the boot processor on i386 due to some
+ * interrupts only being able to be serviced by the BSP.
+ * Especially so if we're not using an IOAPIC -zwane
+ */
+ if (cpu == 0)
+ return -EBUSY;
+
+ /* Allow any queued timer interrupts to get serviced */
+ local_irq_enable();
+ mdelay(1);
+ local_irq_disable();
+
+ cpu_clear(cpu, map);
+ fixup_irqs(map);
+ /* It's now safe to remove this processor from the online map */
+ cpu_clear(cpu, cpu_online_map);
+ return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+ /* We don't do anything here: idle task is faking death itself. */
+ unsigned int i;
+
+ for (i = 0; i < 10; i++) {
+ /* They ack this in play_dead by setting CPU_DEAD */
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD)
+ return;
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(HZ/10);
+ }
+ printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+}
+#else /* ... !CONFIG_HOTPLUG_CPU */
+int __cpu_disable(void)
+{
+ return -ENOSYS;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+ /* We said "no" in __cpu_disable */
+ BUG();
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+int __devinit __cpu_up(unsigned int cpu)
+{
+ /* In case one didn't come up */
+ if (!cpu_isset(cpu, cpu_callin_map)) {
+ printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
+ local_irq_enable();
+ return -EIO;
+ }
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /* Already up, and in cpu_quiescent now? */
+ if (cpu_isset(cpu, smp_commenced_mask)) {
+ cpu_enable(cpu);
+ return 0;
+ }
+#endif
+
+ local_irq_enable();
+ /* Unleash the CPU! */
+ cpu_set(cpu, smp_commenced_mask);
+ while (!cpu_isset(cpu, cpu_online_map))
+ mb();
+ return 0;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+#if 1
+#else
+#ifdef CONFIG_X86_IO_APIC
+ setup_ioapic_dest();
+#endif
+ zap_low_mappings();
+ /*
+ * Disable executability of the SMP trampoline:
+ */
+ set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
+#endif
+}
+
+extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
+extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
+
+static DEFINE_PER_CPU(int, resched_irq);
+static DEFINE_PER_CPU(int, callfunc_irq);
+static char resched_name[NR_CPUS][15];
+static char callfunc_name[NR_CPUS][15];
+
+void __init smp_intr_init(void)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(resched_irq, cpu) =
+ bind_ipi_on_cpu_to_irq(cpu, RESCHEDULE_VECTOR);
+ sprintf(resched_name[cpu], "resched%d", cpu);
+ BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
+ SA_INTERRUPT, resched_name[cpu], NULL));
+
+ per_cpu(callfunc_irq, cpu) =
+ bind_ipi_on_cpu_to_irq(cpu, CALL_FUNCTION_VECTOR);
+ sprintf(callfunc_name[cpu], "callfunc%d", cpu);
+ BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
+ smp_call_function_interrupt,
+ SA_INTERRUPT, callfunc_name[cpu], NULL));
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/time.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/time.c
index bf8e8f9d90..e490bbb629 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/time.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/time.c
@@ -47,6 +47,7 @@
#include <linux/efi.h>
#include <linux/mca.h>
#include <linux/sysctl.h>
+#include <linux/percpu.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -76,7 +77,20 @@ u64 jiffies_64 = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64);
+#if defined(__x86_64__)
+unsigned long vxtime_hz = PIT_TICK_RATE;
+struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
+volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
+struct timespec __xtime __section_xtime;
+struct timezone __sys_tz __section_sys_tz;
+#endif
+
+#if defined(__x86_64__)
+unsigned int cpu_khz; /* Detected as we calibrate the TSC */
+#else
unsigned long cpu_khz; /* Detected as we calibrate the TSC */
+#endif
extern unsigned long wall_jiffies;
@@ -94,7 +108,6 @@ u32 shadow_tsc_stamp;
u64 shadow_system_time;
static u32 shadow_time_version;
static struct timeval shadow_tv;
-extern u64 processed_system_time;
/*
* We use this to ensure that gettimeofday() is monotonically increasing. We
@@ -111,7 +124,8 @@ static long last_rtc_update, last_update_to_xen;
static long last_update_from_xen; /* UTC seconds when last read Xen clock. */
/* Keep track of last time we did processing/updating of jiffies and xtime. */
-u64 processed_system_time; /* System time (ns) at last processing. */
+static u64 processed_system_time; /* System time (ns) at last processing. */
+static DEFINE_PER_CPU(u64, processed_system_time);
#define NS_PER_TICK (1000000000ULL/HZ)
@@ -378,39 +392,52 @@ static inline void do_timer_interrupt(int irq, void *dev_id,
struct pt_regs *regs)
{
time_t wtm_sec, sec;
- s64 delta, nsec;
+ s64 delta, delta_cpu, nsec;
long sec_diff, wtm_nsec;
+ int cpu = smp_processor_id();
do {
__get_time_values_from_xen();
- delta = (s64)(shadow_system_time +
- ((s64)cur_timer->get_offset() *
- (s64)NSEC_PER_USEC) -
- processed_system_time);
+ delta = delta_cpu = (s64)shadow_system_time +
+ ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC);
+ delta -= processed_system_time;
+ delta_cpu -= per_cpu(processed_system_time, cpu);
}
while (!TIME_VALUES_UP_TO_DATE);
- if (unlikely(delta < 0)) {
- printk("Timer ISR: Time went backwards: %lld %lld %lld %lld\n",
- delta, shadow_system_time,
+ if (unlikely(delta < 0) || unlikely(delta_cpu < 0)) {
+ printk("Timer ISR/%d: Time went backwards: "
+ "delta=%lld cpu_delta=%lld shadow=%lld "
+ "off=%lld processed=%lld cpu_processed=%lld\n",
+ cpu, delta, delta_cpu, shadow_system_time,
((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC),
- processed_system_time);
+ processed_system_time,
+ per_cpu(processed_system_time, cpu));
+ for (cpu = 0; cpu < num_online_cpus(); cpu++)
+ printk(" %d: %lld\n", cpu,
+ per_cpu(processed_system_time, cpu));
return;
}
- /* Process elapsed jiffies since last call. */
+ /* System-wide jiffy work. */
while (delta >= NS_PER_TICK) {
delta -= NS_PER_TICK;
processed_system_time += NS_PER_TICK;
do_timer(regs);
-#ifndef CONFIG_SMP
+ }
+
+ /* Local CPU jiffy work. */
+ while (delta_cpu >= NS_PER_TICK) {
+ delta_cpu -= NS_PER_TICK;
+ per_cpu(processed_system_time, cpu) += NS_PER_TICK;
update_process_times(user_mode(regs));
-#endif
- if (regs)
- profile_tick(CPU_PROFILING, regs);
+ profile_tick(CPU_PROFILING, regs);
}
+ if (cpu != 0)
+ return;
+
/*
* Take synchronised time from Xen once a minute if we're not
* synchronised ourselves, and we haven't chosen to keep an independent
@@ -618,10 +645,10 @@ void __init hpet_time_init(void)
#endif
/* Dynamically-mapped IRQ. */
-static int TIMER_IRQ;
+static DEFINE_PER_CPU(int, timer_irq);
static struct irqaction irq_timer = {
- timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer",
+ timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer0",
NULL, NULL
};
@@ -643,14 +670,23 @@ void __init time_init(void)
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
processed_system_time = shadow_system_time;
+ per_cpu(processed_system_time, 0) = processed_system_time;
if (timer_tsc_init.init(NULL) != 0)
BUG();
printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
- TIMER_IRQ = bind_virq_to_irq(VIRQ_TIMER);
+#if defined(__x86_64__)
+ vxtime.mode = VXTIME_TSC;
+ vxtime.quot = (1000000L << 32) / vxtime_hz;
+ vxtime.tsc_quot = (1000L << 32) / cpu_khz;
+ vxtime.hz = vxtime_hz;
+ sync_core();
+ rdtscll(vxtime.last_tsc);
+#endif
- (void)setup_irq(TIMER_IRQ, &irq_timer);
+ per_cpu(timer_irq, 0) = bind_virq_to_irq(VIRQ_TIMER);
+ (void)setup_irq(per_cpu(timer_irq, 0), &irq_timer);
}
/* Convert jiffies to system time. Call with xtime_lock held for reading. */
@@ -674,18 +710,28 @@ int set_timeout_timer(void)
u64 alarm = 0;
int ret = 0;
unsigned long j;
+#ifdef CONFIG_SMP
+ unsigned long seq;
+#endif
/*
* This is safe against long blocking (since calculations are
* not based on TSC deltas). It is also safe against warped
* system time since suspend-resume is cooperative and we
- * would first get locked out. It is safe against normal
- * updates of jiffies since interrupts are off.
+ * would first get locked out.
*/
+#ifdef CONFIG_SMP
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ j = jiffies + 1;
+ alarm = __jiffies_to_st(j);
+ } while (read_seqretry(&xtime_lock, seq));
+#else
j = next_timer_interrupt();
if (j < (jiffies + 1))
j = jiffies + 1;
alarm = __jiffies_to_st(j);
+#endif
/* Failure is pretty bad, but we'd best soldier on. */
if ( HYPERVISOR_set_timer_op(alarm) != 0 )
@@ -710,6 +756,7 @@ void time_resume(void)
/* Reset our own concept of passage of system time. */
processed_system_time = shadow_system_time;
+ per_cpu(processed_system_time, 0) = processed_system_time;
/* Accept a warp in UTC (wall-clock) time. */
last_seen_tv.tv_sec = 0;
@@ -718,6 +765,24 @@ void time_resume(void)
last_update_from_xen = 0;
}
+#ifdef CONFIG_SMP
+static char timer_name[NR_CPUS][15];
+void local_setup_timer(void)
+{
+ int seq, cpu = smp_processor_id();
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ per_cpu(processed_system_time, cpu) = shadow_system_time;
+ } while (read_seqretry(&xtime_lock, seq));
+
+ per_cpu(timer_irq, cpu) = bind_virq_to_irq(VIRQ_TIMER);
+ sprintf(timer_name[cpu], "timer%d", cpu);
+ BUG_ON(request_irq(per_cpu(timer_irq, cpu), timer_interrupt,
+ SA_INTERRUPT, timer_name[cpu], NULL));
+}
+#endif
+
/*
* /proc/sys/xen: This really belongs in another file. It can stay here for
* now however.
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/traps.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/traps.c
index 47396aa186..539c1d5b7d 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/traps.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/traps.c
@@ -465,14 +465,7 @@ fastcall void do_general_protection(struct pt_regs * regs, long error_code)
unsigned long ldt;
__asm__ __volatile__ ("sldt %0" : "=r" (ldt));
if (ldt == 0) {
- mmu_update_t u;
- u.ptr = MMU_EXTENDED_COMMAND;
- u.ptr |= (unsigned long)&default_ldt[0];
- u.val = MMUEXT_SET_LDT | (5 << MMUEXT_CMD_SHIFT);
- if (unlikely(HYPERVISOR_mmu_update(&u, 1, NULL) < 0)) {
- show_trace(NULL, (unsigned long *)&u);
- panic("Failed to install default LDT");
- }
+ xen_set_ldt((unsigned long)&default_ldt[0], 5);
return;
}
}
@@ -616,6 +609,14 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
nmi_enter();
cpu = smp_processor_id();
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (!cpu_online(cpu)) {
+ nmi_exit();
+ return;
+ }
+#endif
+
++nmi_count(cpu);
if (!nmi_callback(regs, cpu))
@@ -893,15 +894,7 @@ asmlinkage void math_state_restore(struct pt_regs regs)
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = thread->task;
- /*
- * A trap in kernel mode can be ignored. It'll be the fast XOR or
- * copying libraries, which will correctly save/restore state and
- * reset the TS bit in CR0.
- */
- if ((regs.xcs & 2) == 0)
- return;
-
- clts(); /* Allow maths ops (or we recurse) */
+ /* NB. 'clts' is done for us by Xen during virtual trap. */
if (!tsk_used_math(tsk))
init_fpu(tsk);
restore_fpu(tsk);
@@ -964,17 +957,26 @@ static trap_info_t trap_table[] = {
void __init trap_init(void)
{
HYPERVISOR_set_trap_table(trap_table);
- HYPERVISOR_set_fast_trap(SYSCALL_VECTOR);
/*
* default LDT is a single-entry callgate to lcall7 for iBCS
* and a callgate to lcall27 for Solaris/x86 binaries
*/
make_lowmem_page_readonly(&default_ldt[0]);
- xen_flush_page_update_queue();
/*
* Should be a barrier for any external CPU state.
*/
cpu_init();
}
+
+void smp_trap_init(trap_info_t *trap_ctxt)
+{
+ trap_info_t *t = trap_table;
+
+ for (t = trap_table; t->address; t++) {
+ trap_ctxt[t->vector].flags = t->flags;
+ trap_ctxt[t->vector].cs = t->cs;
+ trap_ctxt[t->vector].address = t->address;
+ }
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/mach-default/Makefile b/linux-2.6.11-xen-sparse/arch/xen/i386/mach-default/Makefile
new file mode 100644
index 0000000000..7d50b2926e
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mach-default/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the linux kernel.
+#
+
+c-obj-y := topology.o
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y)):
+ @ln -fsn $(srctree)/arch/i386/mach-default/$(notdir $@) $@
+
+obj-y += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-))
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/Makefile b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/Makefile
index 016d205d60..50d99c2d3d 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/Makefile
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/Makefile
@@ -6,8 +6,8 @@ XENARCH := $(subst ",,$(CONFIG_XENARCH))
CFLAGS += -Iarch/$(XENARCH)/mm
-obj-y := init.o pgtable.o fault.o ioremap.o pageattr.o hypervisor.o
-c-obj-y := extable.o mmap.o
+obj-y := init.o pgtable.o fault.o ioremap.o hypervisor.o
+c-obj-y := extable.o mmap.o pageattr.o
c-obj-$(CONFIG_DISCONTIGMEM) += discontig.o
c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/fault.c b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/fault.c
index 478c20fe8d..99e2cccc40 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/fault.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/fault.c
@@ -21,6 +21,7 @@
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <linux/highmem.h>
#include <linux/module.h>
+#include <linux/percpu.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -29,7 +30,7 @@
extern void die(const char *,struct pt_regs *,long);
-pgd_t *cur_pgd; /* XXXsmp */
+DEFINE_PER_CPU(pgd_t *, cur_pgd);
/*
* Unlock any spinlocks which will prevent us from getting the
@@ -453,7 +454,8 @@ no_context:
printk(" at virtual address %08lx\n",address);
printk(KERN_ALERT " printing eip:\n");
printk("%08lx\n", regs->eip);
- page = ((unsigned long *) cur_pgd)[address >> 22];
+ page = ((unsigned long *) per_cpu(cur_pgd, smp_processor_id()))
+ [address >> 22];
printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
machine_to_phys(page));
/*
@@ -529,7 +531,7 @@ vmalloc_fault:
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
- pgd = index + cur_pgd;
+ pgd = index + per_cpu(cur_pgd, smp_processor_id());
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
@@ -551,7 +553,6 @@ vmalloc_fault:
if (!pmd_present(*pmd_k))
goto no_context;
set_pmd(pmd, *pmd_k);
- xen_flush_page_update_queue(); /* flush PMD update */
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c
index 6c223c79e1..16b253866f 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c
@@ -34,317 +34,150 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm-xen/hypervisor.h>
-#include <asm-xen/multicall.h>
#include <asm-xen/balloon.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#include <linux/percpu.h>
+#include <asm/tlbflush.h>
+#endif
-/*
- * This suffices to protect us if we ever move to SMP domains.
- * Further, it protects us against interrupts. At the very least, this is
- * required for the network driver which flushes the update queue before
- * pushing new receive buffers.
- */
-static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
-
-/* Linux 2.6 isn't using the traditional batched interface. */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#define QUEUE_SIZE 2048
#define pte_offset_kernel pte_offset
-#define pmd_val_ma(v) (v).pmd;
#define pud_t pgd_t
#define pud_offset(d, va) d
#else
-#define QUEUE_SIZE 128
#define pmd_val_ma(v) (v).pud.pgd.pgd;
#endif
-static mmu_update_t update_queue[QUEUE_SIZE];
-unsigned int mmu_update_queue_idx = 0;
-#define idx mmu_update_queue_idx
-
-/*
- * MULTICALL_flush_page_update_queue:
- * This is a version of the flush which queues as part of a multicall.
- */
-void MULTICALL_flush_page_update_queue(void)
-{
- unsigned long flags;
- unsigned int _idx;
- spin_lock_irqsave(&update_lock, flags);
- if ( (_idx = idx) != 0 )
- {
- idx = 0;
- wmb(); /* Make sure index is cleared first to avoid double updates. */
- queue_multicall3(__HYPERVISOR_mmu_update,
- (unsigned long)update_queue,
- (unsigned long)_idx,
- (unsigned long)NULL);
- }
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-static inline void __flush_page_update_queue(void)
-{
- unsigned int _idx = idx;
- idx = 0;
- wmb(); /* Make sure index is cleared first to avoid double updates. */
- if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) )
- {
- printk(KERN_ALERT "Failed to execute MMU updates.\n");
- BUG();
- }
-}
-
-void _flush_page_update_queue(void)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- if ( idx != 0 ) __flush_page_update_queue();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-static inline void increment_index(void)
-{
- idx++;
- if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue();
-}
-
-static inline void increment_index_and_flush(void)
-{
- idx++;
- __flush_page_update_queue();
-}
-
-void queue_l1_entry_update(pte_t *ptr, unsigned long val)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = val;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_l2_entry_update(pmd_t *ptr, pmd_t val)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = pmd_val_ma(val);
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pt_switch(unsigned long ptr)
+#ifndef CONFIG_XEN_SHADOW_MODE
+void xen_l1_entry_update(pte_t *ptr, unsigned long val)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_NEW_BASEPTR;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+ u.val = val;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
}
-void queue_tlb_flush(void)
+void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_TLB_FLUSH;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+ u.val = pmd_val_ma(val);
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
}
+#endif
-void queue_invlpg(unsigned long ptr)
+void xen_machphys_update(unsigned long mfn, unsigned long pfn)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].ptr |= ptr & PAGE_MASK;
- update_queue[idx].val = MMUEXT_INVLPG;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ u.val = pfn;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
}
-void queue_pgd_pin(unsigned long ptr)
+void xen_pt_switch(unsigned long ptr)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L2_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ op.cmd = MMUEXT_NEW_BASEPTR;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-void queue_pgd_unpin(unsigned long ptr)
+void xen_tlb_flush(void)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-void queue_pte_pin(unsigned long ptr)
+void xen_invlpg(unsigned long ptr)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L1_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ op.cmd = MMUEXT_INVLPG_LOCAL;
+ op.linear_addr = ptr & PAGE_MASK;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-void queue_pte_unpin(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
+#ifdef CONFIG_SMP
-void queue_set_ldt(unsigned long ptr, unsigned long len)
+void xen_tlb_flush_all(void)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr;
- update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ op.cmd = MMUEXT_TLB_FLUSH_ALL;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-void queue_machphys_update(unsigned long mfn, unsigned long pfn)
+void xen_tlb_flush_mask(cpumask_t *mask)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- update_queue[idx].val = pfn;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ if ( cpus_empty(*mask) )
+ return;
+ op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+ op.cpuset = mask->bits;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-/* queue and flush versions of the above */
-void xen_l1_entry_update(pte_t *ptr, unsigned long val)
+void xen_invlpg_all(unsigned long ptr)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = val;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ op.cmd = MMUEXT_INVLPG_ALL;
+ op.linear_addr = ptr & PAGE_MASK;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = pmd_val_ma(val);
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ if ( cpus_empty(*mask) )
+ return;
+ op.cmd = MMUEXT_INVLPG_MULTI;
+ op.cpuset = mask->bits;
+ op.linear_addr = ptr & PAGE_MASK;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-void xen_pt_switch(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_NEW_BASEPTR;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_tlb_flush(void)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_TLB_FLUSH;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_invlpg(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].ptr |= ptr & PAGE_MASK;
- update_queue[idx].val = MMUEXT_INVLPG;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
+#endif /* CONFIG_SMP */
+#ifndef CONFIG_XEN_SHADOW_MODE
void xen_pgd_pin(unsigned long ptr)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L2_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ op.cmd = MMUEXT_PIN_L2_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
void xen_pgd_unpin(unsigned long ptr)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ op.cmd = MMUEXT_UNPIN_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
void xen_pte_pin(unsigned long ptr)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L1_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ op.cmd = MMUEXT_PIN_L1_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
void xen_pte_unpin(unsigned long ptr)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ op.cmd = MMUEXT_UNPIN_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
+#endif
void xen_set_ldt(unsigned long ptr, unsigned long len)
{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr;
- update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_machphys_update(unsigned long mfn, unsigned long pfn)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- update_queue[idx].val = pfn;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ struct mmuext_op op;
+ op.cmd = MMUEXT_SET_LDT;
+ op.linear_addr = ptr;
+ op.nr_ents = len;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
#ifdef CONFIG_XEN_PHYSDEV_ACCESS
@@ -377,13 +210,12 @@ unsigned long allocate_empty_lowmem_region(unsigned long pages)
pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
- queue_l1_entry_update(pte, 0);
+ HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
INVALID_P2M_ENTRY;
}
- /* Flush updates through and flush the TLB. */
- xen_tlb_flush();
+ flush_tlb_all();
balloon_put_pages(pfn_array, 1 << order);
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c
index 698aa2b482..b571bd6aaf 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c
@@ -192,7 +192,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
}
pte_ofs = 0;
}
- flush_page_update_queue();
}
pmd_idx = 0;
}
@@ -356,12 +355,13 @@ static void __init pagetable_init (void)
*/
memcpy(pgd_base, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
make_page_readonly(pgd_base);
- queue_pgd_pin(__pa(pgd_base));
+ xen_pgd_pin(__pa(pgd_base));
load_cr3(pgd_base);
- queue_pgd_unpin(__pa(old_pgd));
+ xen_pgd_unpin(__pa(old_pgd));
make_page_writable(old_pgd);
__flush_tlb_all();
free_bootmem(__pa(old_pgd), PAGE_SIZE);
+ init_mm.context.pinned = 1;
kernel_physical_mapping_init(pgd_base);
remap_numa_kva();
@@ -563,8 +563,7 @@ void __init paging_init(void)
zone_sizes_init();
/* Switch to the real shared_info page, and clear the dummy page. */
- flush_page_update_queue();
- set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
+ set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
memset(empty_zero_page, 0, sizeof(empty_zero_page));
@@ -572,10 +571,11 @@ void __init paging_init(void)
/* Setup mapping of lower 1st MB */
for (i = 0; i < NR_FIX_ISAMAPS; i++)
if (xen_start_info.flags & SIF_PRIVILEGED)
- set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
+ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
else
- set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
- virt_to_machine(empty_zero_page));
+ __set_fixmap(FIX_ISAMAP_BEGIN - i,
+ virt_to_machine(empty_zero_page),
+ PAGE_KERNEL_RO);
#endif
}
@@ -712,18 +712,9 @@ void __init mem_init(void)
kmem_cache_t *pgd_cache;
kmem_cache_t *pmd_cache;
-kmem_cache_t *pte_cache;
void __init pgtable_cache_init(void)
{
- pte_cache = kmem_cache_create("pte",
- PTRS_PER_PTE*sizeof(pte_t),
- PTRS_PER_PTE*sizeof(pte_t),
- 0,
- pte_ctor,
- pte_dtor);
- if (!pte_cache)
- panic("pgtable_cache_init(): Cannot create pte cache");
if (PTRS_PER_PMD > 1) {
pmd_cache = kmem_cache_create("pmd",
PTRS_PER_PMD*sizeof(pmd_t),
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/ioremap.c b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/ioremap.c
index 8a0df417ed..86a3672e33 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/ioremap.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/ioremap.c
@@ -108,7 +108,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
if(!PageReserved(page))
return NULL;
- domid = DOMID_LOCAL;
+ domid = DOMID_SELF;
}
/*
@@ -256,7 +256,7 @@ void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
*/
idx = FIX_BTMAP_BEGIN;
while (nrpages > 0) {
- set_fixmap_ma(idx, phys_addr);
+ set_fixmap(idx, phys_addr);
phys_addr += PAGE_SIZE;
--idx;
--nrpages;
@@ -393,15 +393,7 @@ int direct_remap_area_pages(struct mm_struct *mm,
int i;
unsigned long start_address;
#define MAX_DIRECTMAP_MMU_QUEUE 130
- mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
-
- v = w = &u[0];
- if (domid != DOMID_LOCAL) {
- u[0].ptr = MMU_EXTENDED_COMMAND;
- u[0].val = MMUEXT_SET_FOREIGNDOM;
- u[0].val |= (unsigned long)domid << 16;
- v = w = &u[1];
- }
+ mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v = u;
start_address = address;
@@ -413,11 +405,11 @@ int direct_remap_area_pages(struct mm_struct *mm,
__direct_remap_area_pages(mm,
start_address,
address-start_address,
- w);
+ u);
- if (HYPERVISOR_mmu_update(u, v - u, NULL) < 0)
+ if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
return -EFAULT;
- v = w;
+ v = u;
start_address = address;
}
@@ -432,13 +424,13 @@ int direct_remap_area_pages(struct mm_struct *mm,
v++;
}
- if (v != w) {
+ if (v != u) {
/* get the ptep's filled in */
__direct_remap_area_pages(mm,
start_address,
address-start_address,
- w);
- if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0))
+ u);
+ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
return -EFAULT;
}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pageattr.c b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pageattr.c
deleted file mode 100644
index 1b79c7e684..0000000000
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pageattr.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright 2002 Andi Kleen, SuSE Labs.
- * Thanks to Ben LaHaise for precious feedback.
- */
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-#include <asm/tlbflush.h>
-
-static DEFINE_SPINLOCK(cpa_lock);
-static struct list_head df_list = LIST_HEAD_INIT(df_list);
-
-
-pte_t *lookup_address(unsigned long address)
-{
- pgd_t *pgd = pgd_offset_k(address);
- pud_t *pud;
- pmd_t *pmd;
- if (pgd_none(*pgd))
- return NULL;
- pud = pud_offset(pgd, address);
- if (pud_none(*pud))
- return NULL;
- pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd))
- return NULL;
- if (pmd_large(*pmd))
- return (pte_t *)pmd;
- return pte_offset_kernel(pmd, address);
-}
-
-static struct page *split_large_page(unsigned long address, pgprot_t prot)
-{
- int i;
- unsigned long addr;
- struct page *base;
- pte_t *pbase;
-
- spin_unlock_irq(&cpa_lock);
- base = alloc_pages(GFP_KERNEL, 0);
- spin_lock_irq(&cpa_lock);
- if (!base)
- return NULL;
-
- address = __pa(address);
- addr = address & LARGE_PAGE_MASK;
- pbase = (pte_t *)page_address(base);
- for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
- pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
- addr == address ? prot : PAGE_KERNEL);
- }
- return base;
-}
-
-static void flush_kernel_map(void *dummy)
-{
- /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
- if (boot_cpu_data.x86_model >= 4)
- wbinvd();
- /* Flush all to work around Errata in early athlons regarding
- * large page flushing.
- */
- __flush_tlb_all();
-}
-
-static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
-{
- struct page *page;
- unsigned long flags;
-
- set_pte_atomic(kpte, pte); /* change init_mm */
- if (PTRS_PER_PMD > 1)
- return;
-
- spin_lock_irqsave(&pgd_lock, flags);
- for (page = pgd_list; page; page = (struct page *)page->index) {
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pgd = (pgd_t *)page_address(page) + pgd_index(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- set_pte_atomic((pte_t *)pmd, pte);
- }
- spin_unlock_irqrestore(&pgd_lock, flags);
-}
-
-/*
- * No more special protections in this 2/4MB area - revert to a
- * large page again.
- */
-static inline void revert_page(struct page *kpte_page, unsigned long address)
-{
- pte_t *linear = (pte_t *)
- pmd_offset(pud_offset(pgd_offset_k(address), address), address);
- set_pmd_pte(linear, address,
- pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
- PAGE_KERNEL_LARGE));
-}
-
-static int
-__change_page_attr(struct page *page, pgprot_t prot)
-{
- pte_t *kpte;
- unsigned long address;
- struct page *kpte_page;
-
- BUG_ON(PageHighMem(page));
- address = (unsigned long)page_address(page);
-
- kpte = lookup_address(address);
- if (!kpte)
- return -EINVAL;
- kpte_page = virt_to_page(kpte);
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
- if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
- set_pte_batched(kpte, mk_pte(page, prot));
- } else {
- struct page *split = split_large_page(address, prot);
- if (!split)
- return -ENOMEM;
- set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
- kpte_page = split;
- }
- get_page(kpte_page);
- } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
- set_pte_batched(kpte, mk_pte(page, PAGE_KERNEL));
- __put_page(kpte_page);
- } else
- BUG();
-
- /*
- * If the pte was reserved, it means it was created at boot
- * time (not via split_large_page) and in turn we must not
- * replace it with a largepage.
- */
- if (!PageReserved(kpte_page)) {
- /* memleak and potential failed 2M page regeneration */
- BUG_ON(!page_count(kpte_page));
-
- if (cpu_has_pse && (page_count(kpte_page) == 1)) {
- list_add(&kpte_page->lru, &df_list);
- revert_page(kpte_page, address);
- }
- }
- return 0;
-}
-
-static inline void flush_map(void)
-{
- on_each_cpu(flush_kernel_map, NULL, 1, 1);
-}
-
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * This should be used when a page is mapped with a different caching policy
- * than write-back somewhere - some CPUs do not like it when mappings with
- * different caching policies exist. This changes the page attributes of the
- * in kernel linear mapping too.
- *
- * The caller needs to ensure that there are no conflicting mappings elsewhere.
- * This function only deals with the kernel linear map.
- *
- * Caller must call global_flush_tlb() after this.
- */
-int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
- int err = 0;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&cpa_lock, flags);
- for (i = 0; i < numpages; i++, page++) {
- err = __change_page_attr(page, prot);
- if (err)
- break;
- }
- flush_page_update_queue();
- spin_unlock_irqrestore(&cpa_lock, flags);
- return err;
-}
-
-void global_flush_tlb(void)
-{
- LIST_HEAD(l);
- struct list_head* n;
-
- BUG_ON(irqs_disabled());
-
- spin_lock_irq(&cpa_lock);
- list_splice_init(&df_list, &l);
- spin_unlock_irq(&cpa_lock);
- flush_map();
- n = l.next;
- while (n != &l) {
- struct page *pg = list_entry(n, struct page, lru);
- n = n->next;
- __free_page(pg);
- }
-}
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
-{
- if (PageHighMem(page))
- return;
- /* the return value is ignored - the calls cannot fail,
- * large pages are disabled at boot time.
- */
- change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
- /* we should perform an IPI and flush all tlbs,
- * but that can deadlock->flush only current cpu.
- */
- __flush_tlb_all();
-}
-#endif
-
-EXPORT_SYMBOL(change_page_attr);
-EXPORT_SYMBOL(global_flush_tlb);
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c
index 336d109981..0215422a5a 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c
@@ -22,6 +22,7 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
+#include <asm/mmu_context.h>
#include <asm-xen/foreign_page.h>
@@ -176,85 +177,57 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
BUG();
return;
}
- set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
-}
-
-void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
-{
- unsigned long address = __fix_to_virt(idx);
-
- if (idx >= __end_of_fixed_addresses) {
- BUG();
- return;
+ switch (idx) {
+ case FIX_WP_TEST:
+ case FIX_VSYSCALL:
+#ifdef CONFIG_X86_F00F_BUG
+ case FIX_F00F_IDT:
+#endif
+ set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
+ break;
+ default:
+ set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
+ break;
}
- set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
- if (pte) {
+ if (pte)
make_page_readonly(pte);
- xen_flush_page_update_queue();
- }
return pte;
}
-void pte_ctor(void *pte, kmem_cache_t *cache, unsigned long unused)
-{
- struct page *page = virt_to_page(pte);
- SetPageForeign(page, pte_free);
- set_page_count(page, 1);
-
- clear_page(pte);
- make_page_readonly(pte);
- queue_pte_pin(__pa(pte));
- flush_page_update_queue();
-}
-
-void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
-{
- struct page *page = virt_to_page(pte);
- ClearPageForeign(page);
-
- queue_pte_unpin(__pa(pte));
- make_page_writable(pte);
- flush_page_update_queue();
-}
-
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pte_t *ptep;
-
-#ifdef CONFIG_HIGHPTE
struct page *pte;
+#ifdef CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
- if (pte == NULL)
- return pte;
- if (PageHighMem(pte))
- return pte;
- /* not a highmem page -- free page and grab one from the cache */
- __free_page(pte);
+#else
+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ if (pte) {
+ SetPageForeign(pte, pte_free);
+ set_page_count(pte, 1);
+ }
#endif
- ptep = kmem_cache_alloc(pte_cache, GFP_KERNEL);
- if (ptep)
- return virt_to_page(ptep);
- return NULL;
+
+ return pte;
}
void pte_free(struct page *pte)
{
+ unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
+
+ if (!pte_write(*virt_to_ptep(va)))
+ HYPERVISOR_update_va_mapping(
+ va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0);
+
+ ClearPageForeign(pte);
set_page_count(pte, 1);
-#ifdef CONFIG_HIGHPTE
- if (!PageHighMem(pte))
-#endif
- kmem_cache_free(pte_cache,
- phys_to_virt(page_to_pseudophys(pte)));
-#ifdef CONFIG_HIGHPTE
- else
- __free_page(pte);
-#endif
+
+ __free_page(pte);
}
void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
@@ -309,15 +282,11 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
if (PTRS_PER_PMD > 1)
- goto out;
+ return;
pgd_list_add(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
- out:
- make_page_readonly(pgd);
- queue_pgd_pin(__pa(pgd));
- flush_page_update_queue();
}
/* never called when PTRS_PER_PMD > 1 */
@@ -325,10 +294,6 @@ void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
{
unsigned long flags; /* can be called from interrupt context */
- queue_pgd_unpin(__pa(pgd));
- make_page_writable(pgd);
- flush_page_update_queue();
-
if (PTRS_PER_PMD > 1)
return;
@@ -363,6 +328,15 @@ out_oom:
void pgd_free(pgd_t *pgd)
{
int i;
+ pte_t *ptep = virt_to_ptep(pgd);
+
+ if (!pte_write(*ptep)) {
+ xen_pgd_unpin(__pa(pgd));
+ HYPERVISOR_update_va_mapping(
+ (unsigned long)pgd,
+ pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
+ 0);
+ }
/* in the PAE case user pgd entries are overwritten before usage */
if (PTRS_PER_PMD > 1)
@@ -372,22 +346,23 @@ void pgd_free(pgd_t *pgd)
kmem_cache_free(pgd_cache, pgd);
}
+#ifndef CONFIG_XEN_SHADOW_MODE
void make_lowmem_page_readonly(void *va)
{
pte_t *pte = virt_to_ptep(va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+ set_pte(pte, pte_wrprotect(*pte));
}
void make_lowmem_page_writable(void *va)
{
pte_t *pte = virt_to_ptep(va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+ set_pte(pte, pte_mkwrite(*pte));
}
void make_page_readonly(void *va)
{
pte_t *pte = virt_to_ptep(va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+ set_pte(pte, pte_wrprotect(*pte));
if ( (unsigned long)va >= (unsigned long)high_memory )
{
unsigned long phys;
@@ -402,7 +377,7 @@ void make_page_readonly(void *va)
void make_page_writable(void *va)
{
pte_t *pte = virt_to_ptep(va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+ set_pte(pte, pte_mkwrite(*pte));
if ( (unsigned long)va >= (unsigned long)high_memory )
{
unsigned long phys;
@@ -431,3 +406,102 @@ void make_pages_writable(void *va, unsigned int nr)
va = (void *)((unsigned long)va + PAGE_SIZE);
}
}
+#endif /* CONFIG_XEN_SHADOW_MODE */
+
+static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
+{
+ struct page *page = virt_to_page(pt);
+ unsigned long pfn = page_to_pfn(page);
+
+ if (PageHighMem(page))
+ return;
+ HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ pfn_pte(pfn, flags), 0);
+}
+
+static void mm_walk(struct mm_struct *mm, pgprot_t flags)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int g,u,m;
+
+ pgd = mm->pgd;
+ for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
+ if (pgd_none(*pgd))
+ continue;
+ pud = pud_offset(pgd, 0);
+ if (PTRS_PER_PUD > 1) /* not folded */
+ mm_walk_set_prot(pud,flags);
+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
+ if (pud_none(*pud))
+ continue;
+ pmd = pmd_offset(pud, 0);
+ if (PTRS_PER_PMD > 1) /* not folded */
+ mm_walk_set_prot(pmd,flags);
+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
+ if (pmd_none(*pmd))
+ continue;
+ pte = pte_offset_kernel(pmd,0);
+ mm_walk_set_prot(pte,flags);
+ }
+ }
+ }
+}
+
+void mm_pin(struct mm_struct *mm)
+{
+ spin_lock(&mm->page_table_lock);
+
+ mm_walk(mm, PAGE_KERNEL_RO);
+ HYPERVISOR_update_va_mapping(
+ (unsigned long)mm->pgd,
+ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO), 0);
+ xen_pgd_pin(__pa(mm->pgd));
+ mm->context.pinned = 1;
+
+ spin_unlock(&mm->page_table_lock);
+}
+
+void mm_unpin(struct mm_struct *mm)
+{
+ spin_lock(&mm->page_table_lock);
+
+ xen_pgd_unpin(__pa(mm->pgd));
+ HYPERVISOR_update_va_mapping(
+ (unsigned long)mm->pgd,
+ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0);
+ mm_walk(mm, PAGE_KERNEL);
+ mm->context.pinned = 0;
+
+ spin_unlock(&mm->page_table_lock);
+}
+
+void _arch_exit_mmap(struct mm_struct *mm)
+{
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
+
+ /*
+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
+ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
+ */
+ if ( tsk->active_mm == mm )
+ {
+ tsk->active_mm = &init_mm;
+ atomic_inc(&init_mm.mm_count);
+
+ switch_mm(mm, &init_mm, tsk);
+
+ atomic_dec(&mm->mm_count);
+ BUG_ON(atomic_read(&mm->mm_count) == 0);
+ }
+
+ task_unlock(tsk);
+
+ if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) )
+ mm_unpin(mm);
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/pci/Makefile b/linux-2.6.11-xen-sparse/arch/xen/i386/pci/Makefile
index 175f5f4819..76c024abfd 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/pci/Makefile
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/pci/Makefile
@@ -6,12 +6,13 @@ c-obj-y := i386.o
c-obj-$(CONFIG_PCI_BIOS) += pcbios.o
c-obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
-obj-$(CONFIG_PCI_DIRECT) += direct.o
+c-obj-$(CONFIG_PCI_DIRECT) += direct.o
c-pci-y := fixup.o
c-pci-$(CONFIG_ACPI_PCI) += acpi.o
c-pci-y += legacy.o
-pci-y += irq.o
+# Make sure irq.o gets linked in after legacy.o
+l-pci-y += irq.o
c-pci-$(CONFIG_X86_VISWS) := visws.o fixup.o
pci-$(CONFIG_X86_VISWS) :=
@@ -26,6 +27,6 @@ c-link :=
$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
@ln -fsn $(srctree)/arch/i386/pci/$(notdir $@) $@
-obj-y += $(c-obj-y)
+obj-y += $(c-obj-y) $(l-pci-y)
clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/pci/direct.c b/linux-2.6.11-xen-sparse/arch/xen/i386/pci/direct.c
deleted file mode 100644
index 88c6692a9a..0000000000
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/pci/direct.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * direct.c - Low-level direct PCI config space access
- */
-
-#include <linux/pci.h>
-#include <linux/init.h>
-#include "pci.h"
-
-#include <asm-xen/xen-public/xen.h>
-#include <asm-xen/xen-public/physdev.h>
-
-/*
- * Functions for accessing PCI configuration space with type xen accesses
- */
-
-static int pci_conf_read (int seg, int bus, int devfn, int reg, int len, u32 *value)
-{
- unsigned long flags;
- physdev_op_t op;
- int ret;
-
- if (!value || (bus > 255) || (devfn > 255) || (reg > 255))
- return -EINVAL;
-
- spin_lock_irqsave(&pci_config_lock, flags);
-
- op.cmd = PHYSDEVOP_PCI_CFGREG_READ;
- op.u.pci_cfgreg_read.bus = bus;
- op.u.pci_cfgreg_read.dev = (devfn & ~0x7) >> 3;
- op.u.pci_cfgreg_read.func = devfn & 0x7;
- op.u.pci_cfgreg_read.reg = reg;
- op.u.pci_cfgreg_read.len = len;
-
- ret = HYPERVISOR_physdev_op(&op);
- if (ret == 0)
- *value = op.u.pci_cfgreg_read.value;
-
- spin_unlock_irqrestore(&pci_config_lock, flags);
-
- return ret;
-}
-
-static int pci_conf_write (int seg, int bus, int devfn, int reg, int len, u32 value)
-{
- unsigned long flags;
- physdev_op_t op;
- int ret;
-
- if ((bus > 255) || (devfn > 255) || (reg > 255))
- return -EINVAL;
-
- spin_lock_irqsave(&pci_config_lock, flags);
-
- op.cmd = PHYSDEVOP_PCI_CFGREG_WRITE;
- op.u.pci_cfgreg_write.bus = bus;
- op.u.pci_cfgreg_write.dev = (devfn & ~0x7) >> 3;
- op.u.pci_cfgreg_write.func = devfn & 0x7;
- op.u.pci_cfgreg_write.reg = reg;
- op.u.pci_cfgreg_write.len = len;
- op.u.pci_cfgreg_write.value = value;
-
- ret = HYPERVISOR_physdev_op(&op);
-
- spin_unlock_irqrestore(&pci_config_lock, flags);
-
- return ret;
-}
-
-struct pci_raw_ops pci_direct_xen = {
- .read = pci_conf_read,
- .write = pci_conf_write,
-};
-
-static int __init pci_direct_init(void)
-{
- printk(KERN_INFO "PCI: Using configuration type Xen\n");
- raw_pci_ops = &pci_direct_xen;
- return 0;
-}
-
-arch_initcall(pci_direct_init);
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/pci/irq.c b/linux-2.6.11-xen-sparse/arch/xen/i386/pci/irq.c
index 816d439b2c..7eeea04f72 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/pci/irq.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/pci/irq.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/dmi.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/io_apic.h>
@@ -20,8 +21,15 @@
#include "pci.h"
-#include <asm-xen/xen-public/xen.h>
-#include <asm-xen/xen-public/physdev.h>
+#define DBG printk
+
+#define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
+#define PIRQ_VERSION 0x0100
+
+static int broken_hp_bios_irq9;
+static int acer_tm360_irqrouting;
+
+static struct irq_routing_table *pirq_table;
static int pirq_enable_irq(struct pci_dev *dev);
@@ -37,33 +45,963 @@ static int pirq_penalty[16] = {
0, 0, 0, 0, 1000, 100000, 100000, 100000
};
+struct irq_router {
+ char *name;
+ u16 vendor, device;
+ int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
+ int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
+};
+
+struct irq_router_handler {
+ u16 vendor;
+ int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
+};
+
int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
+/*
+ * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
+ */
-static int __init pcibios_irq_init(void)
+static struct irq_routing_table * __init pirq_find_routing_table(void)
{
- int bus;
- physdev_op_t op;
+ u8 *addr;
+ struct irq_routing_table *rt;
+ int i;
+ u8 sum;
- DBG("PCI: IRQ init\n");
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
+ rt = (struct irq_routing_table *) addr;
+ if (rt->signature != PIRQ_SIGNATURE ||
+ rt->version != PIRQ_VERSION ||
+ rt->size % 16 ||
+ rt->size < sizeof(struct irq_routing_table))
+ continue;
+ sum = 0;
+ for(i=0; i<rt->size; i++)
+ sum += addr[i];
+ if (!sum) {
+ DBG("PCI: Interrupt Routing Table found at 0x%p\n", rt);
+ return rt;
+ }
+ }
+#endif
+
+ return NULL;
+}
- if (pcibios_enable_irq || raw_pci_ops == NULL)
+/*
+ * If we have a IRQ routing table, use it to search for peer host
+ * bridges. It's a gross hack, but since there are no other known
+ * ways how to get a list of buses, we have to go this way.
+ */
+
+static void __init pirq_peer_trick(void)
+{
+ struct irq_routing_table *rt = pirq_table;
+ u8 busmap[256];
+ int i;
+ struct irq_info *e;
+
+ memset(busmap, 0, sizeof(busmap));
+ for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
+ e = &rt->slots[i];
+#ifdef DEBUG
+ {
+ int j;
+ DBG("%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
+ for(j=0; j<4; j++)
+ DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
+ DBG("\n");
+ }
+#endif
+ busmap[e->bus] = 1;
+ }
+ for(i = 1; i < 256; i++) {
+ if (!busmap[i] || pci_find_bus(0, i))
+ continue;
+ if (pci_scan_bus(i, &pci_root_ops, NULL))
+ printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
+ }
+ pcibios_last_bus = -1;
+}
+
+/*
+ * Code for querying and setting of IRQ routes on various interrupt routers.
+ */
+
+void eisa_set_level_irq(unsigned int irq)
+{
+ unsigned char mask = 1 << (irq & 7);
+ unsigned int port = 0x4d0 + (irq >> 3);
+ unsigned char val;
+ static u16 eisa_irq_mask;
+
+ if (irq >= 16 || (1 << irq) & eisa_irq_mask)
+ return;
+
+ eisa_irq_mask |= (1 << irq);
+ printk("PCI: setting IRQ %u as level-triggered\n", irq);
+ val = inb(port);
+ if (!(val & mask)) {
+ DBG(" -> edge");
+ outb(val | mask, port);
+ }
+}
+
+/*
+ * Common IRQ routing practice: nybbles in config space,
+ * offset by some magic constant.
+ */
+static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
+{
+ u8 x;
+ unsigned reg = offset + (nr >> 1);
+
+ pci_read_config_byte(router, reg, &x);
+ return (nr & 1) ? (x >> 4) : (x & 0xf);
+}
+
+static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
+{
+ u8 x;
+ unsigned reg = offset + (nr >> 1);
+
+ pci_read_config_byte(router, reg, &x);
+ x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
+ pci_write_config_byte(router, reg, x);
+}
+
+/*
+ * ALI pirq entries are damn ugly, and completely undocumented.
+ * This has been figured out from pirq tables, and it's not a pretty
+ * picture.
+ */
+static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ static unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
+
+ return irqmap[read_config_nybble(router, 0x48, pirq-1)];
+}
+
+static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ static unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
+ unsigned int val = irqmap[irq];
+
+ if (val) {
+ write_config_nybble(router, 0x48, pirq-1, val);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
+ * just a pointer to the config space.
+ */
+static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ u8 x;
+
+ pci_read_config_byte(router, pirq, &x);
+ return (x < 16) ? x : 0;
+}
+
+static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ pci_write_config_byte(router, pirq, irq);
+ return 1;
+}
+
+/*
+ * The VIA pirq rules are nibble-based, like ALI,
+ * but without the ugly irq number munging.
+ * However, PIRQD is in the upper instead of lower 4 bits.
+ */
+static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
+}
+
+static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
+ return 1;
+}
+
+/*
+ * ITE 8330G pirq rules are nibble-based
+ * FIXME: pirqmap may be { 1, 0, 3, 2 },
+ * 2+3 are both mapped to irq 9 on my system
+ */
+static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
+ return read_config_nybble(router,0x43, pirqmap[pirq-1]);
+}
+
+static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
+ write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
+ return 1;
+}
+
+/*
+ * OPTI: high four bits are nibble pointer..
+ * I wonder what the low bits do?
+ */
+static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ return read_config_nybble(router, 0xb8, pirq >> 4);
+}
+
+static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ write_config_nybble(router, 0xb8, pirq >> 4, irq);
+ return 1;
+}
+
+/*
+ * Cyrix: nibble offset 0x5C
+ * 0x5C bits 7:4 is INTB bits 3:0 is INTA
+ * 0x5D bits 7:4 is INTD bits 3:0 is INTC
+ */
+static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ return read_config_nybble(router, 0x5C, (pirq-1)^1);
+}
+
+static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
+ return 1;
+}
+
+/*
+ * PIRQ routing for SiS 85C503 router used in several SiS chipsets.
+ * We have to deal with the following issues here:
+ * - vendors have different ideas about the meaning of link values
+ * - some onboard devices (integrated in the chipset) have special
+ * links and are thus routed differently (i.e. not via PCI INTA-INTD)
+ * - different revision of the router have a different layout for
+ * the routing registers, particularly for the onchip devices
+ *
+ * For all routing registers the common thing is we have one byte
+ * per routeable link which is defined as:
+ * bit 7 IRQ mapping enabled (0) or disabled (1)
+ * bits [6:4] reserved (sometimes used for onchip devices)
+ * bits [3:0] IRQ to map to
+ * allowed: 3-7, 9-12, 14-15
+ * reserved: 0, 1, 2, 8, 13
+ *
+ * The config-space registers located at 0x41/0x42/0x43/0x44 are
+ * always used to route the normal PCI INT A/B/C/D respectively.
+ * Apparently there are systems implementing PCI routing table using
+ * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
+ * We try our best to handle both link mappings.
+ *
+ * Currently (2003-05-21) it appears most SiS chipsets follow the
+ * definition of routing registers from the SiS-5595 southbridge.
+ * According to the SiS 5595 datasheets the revision id's of the
+ * router (ISA-bridge) should be 0x01 or 0xb0.
+ *
+ * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
+ * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
+ * They seem to work with the current routing code. However there is
+ * some concern because of the two USB-OHCI HCs (original SiS 5595
+ * had only one). YMMV.
+ *
+ * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
+ *
+ * 0x61: IDEIRQ:
+ * bits [6:5] must be written 01
+ * bit 4 channel-select primary (0), secondary (1)
+ *
+ * 0x62: USBIRQ:
+ * bit 6 OHCI function disabled (0), enabled (1)
+ *
+ * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved
+ *
+ * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved
+ *
+ * We support USBIRQ (in addition to INTA-INTD) and keep the
+ * IDE, ACPI and DAQ routing untouched as set by the BIOS.
+ *
+ * Currently the only reported exception is the new SiS 65x chipset
+ * which includes the SiS 69x southbridge. Here we have the 85C503
+ * router revision 0x04 and there are changes in the register layout
+ * mostly related to the different USB HCs with USB 2.0 support.
+ *
+ * Onchip routing for router rev-id 0x04 (try-and-error observation)
+ *
+ * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs
+ * bit 6-4 are probably unused, not like 5595
+ */
+
+#define PIRQ_SIS_IRQ_MASK 0x0f
+#define PIRQ_SIS_IRQ_DISABLE 0x80
+#define PIRQ_SIS_USB_ENABLE 0x40
+
+static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ u8 x;
+ int reg;
+
+ reg = pirq;
+ if (reg >= 0x01 && reg <= 0x04)
+ reg += 0x40;
+ pci_read_config_byte(router, reg, &x);
+ return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK);
+}
+
+static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ u8 x;
+ int reg;
+
+ reg = pirq;
+ if (reg >= 0x01 && reg <= 0x04)
+ reg += 0x40;
+ pci_read_config_byte(router, reg, &x);
+ x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE);
+ x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE;
+ pci_write_config_byte(router, reg, x);
+ return 1;
+}
+
+
+/*
+ * VLSI: nibble offset 0x74 - educated guess due to routing table and
+ * config space of VLSI 82C534 PCI-bridge/router (1004:0102)
+ * Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
+ * devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
+ * for the busbridge to the docking station.
+ */
+
+static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ if (pirq > 8) {
+ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
+ return 0;
+ }
+ return read_config_nybble(router, 0x74, pirq-1);
+}
+
+static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ if (pirq > 8) {
+ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
+ return 0;
+ }
+ write_config_nybble(router, 0x74, pirq-1, irq);
+ return 1;
+}
+
+/*
+ * ServerWorks: PCI interrupts mapped to system IRQ lines through Index
+ * and Redirect I/O registers (0x0c00 and 0x0c01). The Index register
+ * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect
+ * register is a straight binary coding of desired PIC IRQ (low nibble).
+ *
+ * The 'link' value in the PIRQ table is already in the correct format
+ * for the Index register. There are some special index values:
+ * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
+ * and 0x03 for SMBus.
+ */
+static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ outb_p(pirq, 0xc00);
+ return inb(0xc01) & 0xf;
+}
+
+static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ outb_p(pirq, 0xc00);
+ outb_p(irq, 0xc01);
+ return 1;
+}
+
+/* Support for AMD756 PCI IRQ Routing
+ * Jhon H. Caicedo <jhcaiced@osso.org.co>
+ * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
+ * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
+ * The AMD756 pirq rules are nibble-based
+ * offset 0x56 0-3 PIRQA 4-7 PIRQB
+ * offset 0x57 0-3 PIRQC 4-7 PIRQD
+ */
+static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ u8 irq;
+ irq = 0;
+ if (pirq <= 4)
+ {
+ irq = read_config_nybble(router, 0x56, pirq - 1);
+ }
+ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
+ dev->vendor, dev->device, pirq, irq);
+ return irq;
+}
+
+static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
+ dev->vendor, dev->device, pirq, irq);
+ if (pirq <= 4)
+ {
+ write_config_nybble(router, 0x56, pirq - 1, irq);
+ }
+ return 1;
+}
+
+#ifdef CONFIG_PCI_BIOS
+
+static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ struct pci_dev *bridge;
+ int pin = pci_get_interrupt_pin(dev, &bridge);
+ return pcibios_set_irq_routing(bridge, pin, irq);
+}
+
+#endif
+
+static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
+{
+ static struct pci_device_id pirq_440gx[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
+ { },
+ };
+
+ /* 440GX has a proprietary PIRQ router -- don't use it */
+ if (pci_dev_present(pirq_440gx))
+ return 0;
+
+ switch(device)
+ {
+ case PCI_DEVICE_ID_INTEL_82371FB_0:
+ case PCI_DEVICE_ID_INTEL_82371SB_0:
+ case PCI_DEVICE_ID_INTEL_82371AB_0:
+ case PCI_DEVICE_ID_INTEL_82371MX:
+ case PCI_DEVICE_ID_INTEL_82443MX_0:
+ case PCI_DEVICE_ID_INTEL_82801AA_0:
+ case PCI_DEVICE_ID_INTEL_82801AB_0:
+ case PCI_DEVICE_ID_INTEL_82801BA_0:
+ case PCI_DEVICE_ID_INTEL_82801BA_10:
+ case PCI_DEVICE_ID_INTEL_82801CA_0:
+ case PCI_DEVICE_ID_INTEL_82801CA_12:
+ case PCI_DEVICE_ID_INTEL_82801DB_0:
+ case PCI_DEVICE_ID_INTEL_82801E_0:
+ case PCI_DEVICE_ID_INTEL_82801EB_0:
+ case PCI_DEVICE_ID_INTEL_ESB_1:
+ case PCI_DEVICE_ID_INTEL_ICH6_0:
+ case PCI_DEVICE_ID_INTEL_ICH6_1:
+ case PCI_DEVICE_ID_INTEL_ICH7_0:
+ case PCI_DEVICE_ID_INTEL_ICH7_1:
+ r->name = "PIIX/ICH";
+ r->get = pirq_piix_get;
+ r->set = pirq_piix_set;
+ return 1;
+ }
+ return 0;
+}
+
+static __init int via_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
+{
+ /* FIXME: We should move some of the quirk fixup stuff here */
+ switch(device)
+ {
+ case PCI_DEVICE_ID_VIA_82C586_0:
+ case PCI_DEVICE_ID_VIA_82C596:
+ case PCI_DEVICE_ID_VIA_82C686:
+ case PCI_DEVICE_ID_VIA_8231:
+ /* FIXME: add new ones for 8233/5 */
+ r->name = "VIA";
+ r->get = pirq_via_get;
+ r->set = pirq_via_set;
+ return 1;
+ }
+ return 0;
+}
+
+static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
+{
+ switch(device)
+ {
+ case PCI_DEVICE_ID_VLSI_82C534:
+ r->name = "VLSI 82C534";
+ r->get = pirq_vlsi_get;
+ r->set = pirq_vlsi_set;
+ return 1;
+ }
+ return 0;
+}
+
+
+static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
+{
+ switch(device)
+ {
+ case PCI_DEVICE_ID_SERVERWORKS_OSB4:
+ case PCI_DEVICE_ID_SERVERWORKS_CSB5:
+ r->name = "ServerWorks";
+ r->get = pirq_serverworks_get;
+ r->set = pirq_serverworks_set;
+ return 1;
+ }
+ return 0;
+}
+
+static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
+{
+ if (device != PCI_DEVICE_ID_SI_503)
return 0;
+
+ r->name = "SIS";
+ r->get = pirq_sis_get;
+ r->set = pirq_sis_set;
+ return 1;
+}
+
+static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
+{
+ switch(device)
+ {
+ case PCI_DEVICE_ID_CYRIX_5520:
+ r->name = "NatSemi";
+ r->get = pirq_cyrix_get;
+ r->set = pirq_cyrix_set;
+ return 1;
+ }
+ return 0;
+}
+
+static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
+{
+ switch(device)
+ {
+ case PCI_DEVICE_ID_OPTI_82C700:
+ r->name = "OPTI";
+ r->get = pirq_opti_get;
+ r->set = pirq_opti_set;
+ return 1;
+ }
+ return 0;
+}
+
+static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
+{
+ switch(device)
+ {
+ case PCI_DEVICE_ID_ITE_IT8330G_0:
+ r->name = "ITE";
+ r->get = pirq_ite_get;
+ r->set = pirq_ite_set;
+ return 1;
+ }
+ return 0;
+}
+
+static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
+{
+ switch(device)
+ {
+ case PCI_DEVICE_ID_AL_M1533:
+ case PCI_DEVICE_ID_AL_M1563:
+ printk("PCI: Using ALI IRQ Router\n");
+ r->name = "ALI";
+ r->get = pirq_ali_get;
+ r->set = pirq_ali_set;
+ return 1;
+ }
+ return 0;
+}
+
+static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
+{
+ switch(device)
+ {
+ case PCI_DEVICE_ID_AMD_VIPER_740B:
+ r->name = "AMD756";
+ break;
+ case PCI_DEVICE_ID_AMD_VIPER_7413:
+ r->name = "AMD766";
+ break;
+ case PCI_DEVICE_ID_AMD_VIPER_7443:
+ r->name = "AMD768";
+ break;
+ default:
+ return 0;
+ }
+ r->get = pirq_amd756_get;
+ r->set = pirq_amd756_set;
+ return 1;
+}
+
+static __initdata struct irq_router_handler pirq_routers[] = {
+ { PCI_VENDOR_ID_INTEL, intel_router_probe },
+ { PCI_VENDOR_ID_AL, ali_router_probe },
+ { PCI_VENDOR_ID_ITE, ite_router_probe },
+ { PCI_VENDOR_ID_VIA, via_router_probe },
+ { PCI_VENDOR_ID_OPTI, opti_router_probe },
+ { PCI_VENDOR_ID_SI, sis_router_probe },
+ { PCI_VENDOR_ID_CYRIX, cyrix_router_probe },
+ { PCI_VENDOR_ID_VLSI, vlsi_router_probe },
+ { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
+ { PCI_VENDOR_ID_AMD, amd_router_probe },
+ /* Someone with docs needs to add the ATI Radeon IGP */
+ { 0, NULL }
+};
+static struct irq_router pirq_router;
+static struct pci_dev *pirq_router_dev;
+
+
+/*
+ * FIXME: should we have an option to say "generic for
+ * chipset" ?
+ */
+
+static void __init pirq_find_router(struct irq_router *r)
+{
+ struct irq_routing_table *rt = pirq_table;
+ struct irq_router_handler *h;
+
+#ifdef CONFIG_PCI_BIOS
+ if (!rt->signature) {
+ printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
+ r->set = pirq_bios_set;
+ r->name = "BIOS";
+ return;
+ }
+#endif
+
+ /* Default unless a driver reloads it */
+ r->name = "default";
+ r->get = NULL;
+ r->set = NULL;
+
+ DBG("PCI: Attempting to find IRQ router for %04x:%04x\n",
+ rt->rtr_vendor, rt->rtr_device);
+
+ pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
+ if (!pirq_router_dev) {
+ DBG("PCI: Interrupt router not found at %02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
+ return;
+ }
+
+ for( h = pirq_routers; h->vendor; h++) {
+ /* First look for a router match */
+ if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device))
+ break;
+ /* Fall back to a device match */
+ if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device))
+ break;
+ }
+ printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
+ pirq_router.name,
+ pirq_router_dev->vendor,
+ pirq_router_dev->device,
+ pci_name(pirq_router_dev));
+}
+
+static struct irq_info *pirq_get_info(struct pci_dev *dev)
+{
+ struct irq_routing_table *rt = pirq_table;
+ int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
+ struct irq_info *info;
+
+ for (info = rt->slots; entries--; info++)
+ if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
+ return info;
+ return NULL;
+}
+
+static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
+{
+ u8 pin;
+ struct irq_info *info;
+ int i, pirq, newirq;
+ int irq = 0;
+ u32 mask;
+ struct irq_router *r = &pirq_router;
+ struct pci_dev *dev2 = NULL;
+ char *msg = NULL;
+
+ /* Find IRQ pin */
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if (!pin) {
+ DBG(" -> no interrupt pin\n");
+ return 0;
+ }
+ pin = pin - 1;
- op.cmd = PHYSDEVOP_PCI_PROBE_ROOT_BUSES;
- if (HYPERVISOR_physdev_op(&op) != 0) {
- printk(KERN_WARNING "PCI: System does not support PCI\n");
+ /* Find IRQ routing entry */
+
+ if (!pirq_table)
+ return 0;
+
+ DBG("IRQ for %s[%c]", pci_name(dev), 'A' + pin);
+ info = pirq_get_info(dev);
+ if (!info) {
+ DBG(" -> not found in routing table\n");
return 0;
}
+ pirq = info->irq[pin].link;
+ mask = info->irq[pin].bitmap;
+ if (!pirq) {
+ DBG(" -> not routed\n");
+ return 0;
+ }
+ DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
+ mask &= pcibios_irq_mask;
+
+ /* Work around broken HP Pavilion Notebooks which assign USB to
+ IRQ 9 even though it is actually wired to IRQ 11 */
+
+ if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
+ dev->irq = 11;
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
+ r->set(pirq_router_dev, dev, pirq, 11);
+ }
+
+ /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
+ if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) {
+ pirq = 0x68;
+ mask = 0x400;
+ dev->irq = r->get(pirq_router_dev, dev, pirq);
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ }
+
+ /*
+ * Find the best IRQ to assign: use the one
+ * reported by the device if possible.
+ */
+ newirq = dev->irq;
+ if (!((1 << newirq) & mask)) {
+ if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
+ else printk(KERN_WARNING "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n", newirq, pci_name(dev));
+ }
+ if (!newirq && assign) {
+ for (i = 0; i < 16; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+ if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, SA_SHIRQ))
+ newirq = i;
+ }
+ }
+ DBG(" -> newirq=%d", newirq);
+
+ /* Check if it is hardcoded */
+ if ((pirq & 0xf0) == 0xf0) {
+ irq = pirq & 0xf;
+ DBG(" -> hardcoded IRQ %d\n", irq);
+ msg = "Hardcoded";
+ } else if ( r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
+ ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
+ DBG(" -> got IRQ %d\n", irq);
+ msg = "Found";
+ } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
+ DBG(" -> assigning IRQ %d", newirq);
+ if (r->set(pirq_router_dev, dev, pirq, newirq)) {
+ eisa_set_level_irq(newirq);
+ DBG(" ... OK\n");
+ msg = "Assigned";
+ irq = newirq;
+ }
+ }
+
+ if (!irq) {
+ DBG(" ... failed\n");
+ if (newirq && mask == (1 << newirq)) {
+ msg = "Guessed";
+ irq = newirq;
+ } else
+ return 0;
+ }
+ printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev));
+
+ /* Update IRQ for all devices with the same pirq value */
+ while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
+ pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
+ if (!pin)
+ continue;
+ pin--;
+ info = pirq_get_info(dev2);
+ if (!info)
+ continue;
+ if (info->irq[pin].link == pirq) {
+ /* We refuse to override the dev->irq information. Give a warning! */
+ if ( dev2->irq && dev2->irq != irq && \
+ (!(pci_probe & PCI_USE_PIRQ_MASK) || \
+ ((1 << dev2->irq) & mask)) ) {
+#ifndef CONFIG_PCI_MSI
+ printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
+ pci_name(dev2), dev2->irq, irq);
+#endif
+ continue;
+ }
+ dev2->irq = irq;
+ pirq_penalty[irq]++;
+ if (dev != dev2)
+ printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2));
+ }
+ }
+ return 1;
+}
+
+static void __init pcibios_fixup_irqs(void)
+{
+ struct pci_dev *dev = NULL;
+ u8 pin;
+
+ DBG("PCI: IRQ fixup\n");
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ /*
+ * If the BIOS has set an out of range IRQ number, just ignore it.
+ * Also keep track of which IRQ's are already in use.
+ */
+ if (dev->irq >= 16) {
+ DBG("%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
+ dev->irq = 0;
+ }
+ /* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
+ if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
+ pirq_penalty[dev->irq] = 0;
+ pirq_penalty[dev->irq]++;
+ }
+
+ dev = NULL;
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+#ifdef CONFIG_X86_IO_APIC
+ /*
+ * Recalculate IRQ numbers if we use the I/O APIC.
+ */
+ if (io_apic_assign_pci_irqs)
+ {
+ int irq;
- printk(KERN_INFO "PCI: Probing PCI hardware\n");
- for (bus = 0; bus < 256; bus++)
- if (test_bit(bus, (unsigned long *)
- &op.u.pci_probe_root_buses.busmask[0]))
- (void)pcibios_scan_root(bus);
+ if (pin) {
+ pin--; /* interrupt pins are numbered starting from 1 */
+ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
+ /*
+ * Busses behind bridges are typically not listed in the MP-table.
+ * In this case we have to look up the IRQ based on the parent bus,
+ * parent slot, and pin number. The SMP code detects such bridged
+ * busses itself so we should get into this branch reliably.
+ */
+ if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
+ struct pci_dev * bridge = dev->bus->self;
+
+ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
+ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
+ PCI_SLOT(bridge->devfn), pin);
+ if (irq >= 0)
+ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
+ pci_name(bridge), 'A' + pin, irq);
+ }
+ if (irq >= 0) {
+ if (use_pci_vector() &&
+ !platform_legacy_irq(irq))
+ irq = IO_APIC_VECTOR(irq);
+
+ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
+ pci_name(dev), 'A' + pin, irq);
+ dev->irq = irq;
+ }
+ }
+ }
+#endif
+ /*
+ * Still no IRQ? Try to lookup one...
+ */
+ if (pin && !dev->irq)
+ pcibios_lookup_irq(dev, 0);
+ }
+}
+
+/*
+ * Work around broken HP Pavilion Notebooks which assign USB to
+ * IRQ 9 even though it is actually wired to IRQ 11
+ */
+static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d)
+{
+ if (!broken_hp_bios_irq9) {
+ broken_hp_bios_irq9 = 1;
+ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
+ }
+ return 0;
+}
+
+/*
+ * Work around broken Acer TravelMate 360 Notebooks which assign
+ * Cardbus to IRQ 11 even though it is actually wired to IRQ 10
+ */
+static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d)
+{
+ if (!acer_tm360_irqrouting) {
+ acer_tm360_irqrouting = 1;
+ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
+ }
+ return 0;
+}
+
+static struct dmi_system_id __initdata pciirq_dmi_table[] = {
+ {
+ .callback = fix_broken_hp_bios_irq9,
+ .ident = "HP Pavilion N5400 Series Laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"),
+ DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
+ },
+ },
+ {
+ .callback = fix_acer_tm360_irqrouting,
+ .ident = "Acer TravelMate 36x Laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
+ },
+ },
+ { }
+};
+
+static int __init pcibios_irq_init(void)
+{
+ DBG("PCI: IRQ init\n");
+
+ if (pcibios_enable_irq || raw_pci_ops == NULL)
+ return 0;
+
+ dmi_check_system(pciirq_dmi_table);
+
+ pirq_table = pirq_find_routing_table();
+
+#ifdef CONFIG_PCI_BIOS
+ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
+ pirq_table = pcibios_get_irq_routing_table();
+#endif
+ if (pirq_table) {
+ pirq_peer_trick();
+ pirq_find_router(&pirq_router);
+ if (pirq_table->exclusive_irqs) {
+ int i;
+ for (i=0; i<16; i++)
+ if (!(pirq_table->exclusive_irqs & (1 << i)))
+ pirq_penalty[i] += 100;
+ }
+ /* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
+ if (io_apic_assign_pci_irqs)
+ pirq_table = NULL;
+ }
pcibios_enable_irq = pirq_enable_irq;
+ pcibios_fixup_irqs();
return 0;
}
@@ -92,35 +1030,67 @@ void pcibios_penalize_isa_irq(int irq)
static int pirq_enable_irq(struct pci_dev *dev)
{
- int err;
u8 pin;
- physdev_op_t op;
-
- /* Inform Xen that we are going to use this device. */
- op.cmd = PHYSDEVOP_PCI_INITIALISE_DEVICE;
- op.u.pci_initialise_device.bus = dev->bus->number;
- op.u.pci_initialise_device.dev = PCI_SLOT(dev->devfn);
- op.u.pci_initialise_device.func = PCI_FUNC(dev->devfn);
- if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
- return err;
-
- /* Now we can bind to the very final IRQ line. */
- pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &pin);
- dev->irq = pin;
-
- /* Sanity-check that an interrupt-producing device is routed
- * to an IRQ. */
+ extern int via_interrupt_line_quirk;
+ struct pci_dev *temp_dev;
+
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
- if (pin != 0) {
- if (dev->irq != 0)
- printk(KERN_INFO "PCI: Obtained IRQ %d for device %s\n",
- dev->irq, dev->slot_name);
+ if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
+ char *msg;
+ msg = "";
+ if (io_apic_assign_pci_irqs) {
+ int irq;
+
+ if (pin) {
+ pin--; /* interrupt pins are numbered starting from 1 */
+ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
+ /*
+ * Busses behind bridges are typically not listed in the MP-table.
+ * In this case we have to look up the IRQ based on the parent bus,
+ * parent slot, and pin number. The SMP code detects such bridged
+ * busses itself so we should get into this branch reliably.
+ */
+ temp_dev = dev;
+ while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
+ struct pci_dev * bridge = dev->bus->self;
+
+ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
+ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
+ PCI_SLOT(bridge->devfn), pin);
+ if (irq >= 0)
+ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
+ pci_name(bridge), 'A' + pin, irq);
+ dev = bridge;
+ }
+ dev = temp_dev;
+ if (irq >= 0) {
+#ifdef CONFIG_PCI_MSI
+ if (!platform_legacy_irq(irq))
+ irq = IO_APIC_VECTOR(irq);
+#endif
+ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
+ pci_name(dev), 'A' + pin, irq);
+ dev->irq = irq;
+ return 0;
+ } else
+ msg = " Probably buggy MP table.";
+ }
+ } else if (pci_probe & PCI_BIOS_IRQ_SCAN)
+ msg = "";
else
- printk(KERN_WARNING "PCI: No IRQ known for interrupt "
- "pin %c of device %s.\n", 'A' + pin - 1,
- dev->slot_name);
+ msg = " Please try using pci=biosirq.";
+
+ /* With IDE legacy devices the IRQ lookup failure is not a problem.. */
+ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5))
+ return 0;
+
+ printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
+ 'A' + pin - 1, pci_name(dev), msg);
}
-
+ /* VIA bridges use interrupt line for apic/pci steering across
+ the V-Link */
+ else if (via_interrupt_line_quirk)
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq & 15);
return 0;
}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/kernel/Makefile b/linux-2.6.11-xen-sparse/arch/xen/kernel/Makefile
index c2fad2a8dd..7f7f3b173a 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/Makefile
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/Makefile
@@ -11,4 +11,8 @@ $(obj)/vmlinux.lds.S:
extra-y += vmlinux.lds
-obj-y := ctrl_if.o evtchn.o fixup.o reboot.o xen_proc.o skbuff.o devmem.o
+obj-y := ctrl_if.o evtchn.o fixup.o reboot.o gnttab.o devmem.o
+
+obj-$(CONFIG_PROC_FS) += xen_proc.o
+obj-$(CONFIG_NET) += skbuff.o
+obj-$(CONFIG_SMP) += smp.o
diff --git a/linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c b/linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c
index 16852cb02a..c1cfb82dbd 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c
@@ -47,6 +47,19 @@
#endif
/*
+ * Extra ring macros to sync a consumer index up to the public producer index.
+ * Generally UNSAFE, but we use it for recovery and shutdown in some cases.
+ */
+#define RING_DROP_PENDING_REQUESTS(_r) \
+ do { \
+ (_r)->req_cons = (_r)->sring->req_prod; \
+ } while (0)
+#define RING_DROP_PENDING_RESPONSES(_r) \
+ do { \
+ (_r)->rsp_cons = (_r)->sring->rsp_prod; \
+ } while (0)
+
+/*
* Only used by initial domain which must create its own control-interface
* event channel. This value is picked up by the user-space domain controller
* via an ioctl.
@@ -59,8 +72,8 @@ static spinlock_t ctrl_if_lock;
static struct irqaction ctrl_if_irq_action;
-static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
-static CONTROL_RING_IDX ctrl_if_rx_req_cons;
+static ctrl_front_ring_t ctrl_if_tx_ring;
+static ctrl_back_ring_t ctrl_if_rx_ring;
/* Incoming message requests. */
/* Primary message type -> message handler. */
@@ -97,8 +110,6 @@ static void __ctrl_if_rx_tasklet(unsigned long data);
static DECLARE_TASKLET(ctrl_if_rx_tasklet, __ctrl_if_rx_tasklet, 0);
#define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
-#define TX_FULL(_c) \
- (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
static void ctrl_if_notify_controller(void)
{
@@ -113,21 +124,20 @@ static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
static void __ctrl_if_tx_tasklet(unsigned long data)
{
- control_if_t *ctrl_if = get_ctrl_if();
- ctrl_msg_t *msg;
- int was_full = TX_FULL(ctrl_if);
- CONTROL_RING_IDX rp;
+ ctrl_msg_t *msg;
+ int was_full = RING_FULL(&ctrl_if_tx_ring);
+ RING_IDX i, rp;
- rp = ctrl_if->tx_resp_prod;
+ i = ctrl_if_tx_ring.rsp_cons;
+ rp = ctrl_if_tx_ring.sring->rsp_prod;
rmb(); /* Ensure we see all requests up to 'rp'. */
- while ( ctrl_if_tx_resp_cons != rp )
+ for ( ; i != rp; i++ )
{
- msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
-
- DPRINTK("Rx-Rsp %u/%u :: %d/%d\n",
- ctrl_if_tx_resp_cons,
- ctrl_if->tx_resp_prod,
+ msg = RING_GET_RESPONSE(&ctrl_if_tx_ring, i);
+
+ DPRINTK("Rx-Rsp %u/%u :: %d/%d\n", i-1,
+ ctrl_if_tx_ring.sring->rsp_prod,
msg->type, msg->subtype);
/* Execute the callback handler, if one was specified. */
@@ -138,16 +148,16 @@ static void __ctrl_if_tx_tasklet(unsigned long data)
smp_mb(); /* Execute, /then/ free. */
ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
}
-
- /*
- * Step over the message in the ring /after/ finishing reading it. As
- * soon as the index is updated then the message may get blown away.
- */
- smp_mb();
- ctrl_if_tx_resp_cons++;
}
- if ( was_full && !TX_FULL(ctrl_if) )
+ /*
+ * Step over messages in the ring /after/ finishing reading them. As soon
+ * as the index is updated then the message may get blown away.
+ */
+ smp_mb();
+ ctrl_if_tx_ring.rsp_cons = i;
+
+ if ( was_full && !RING_FULL(&ctrl_if_tx_ring) )
{
wake_up(&ctrl_if_tx_wait);
run_task_queue(&ctrl_if_tx_tq);
@@ -172,24 +182,27 @@ static void __ctrl_if_rxmsg_deferred(void *unused)
static void __ctrl_if_rx_tasklet(unsigned long data)
{
- control_if_t *ctrl_if = get_ctrl_if();
ctrl_msg_t msg, *pmsg;
- CONTROL_RING_IDX rp, dp;
+ CONTROL_RING_IDX dp;
+ RING_IDX rp, i;
+ i = ctrl_if_rx_ring.req_cons;
+ rp = ctrl_if_rx_ring.sring->req_prod;
dp = ctrl_if_rxmsg_deferred_prod;
- rp = ctrl_if->rx_req_prod;
rmb(); /* Ensure we see all requests up to 'rp'. */
-
- while ( ctrl_if_rx_req_cons != rp )
+
+ for ( ; i != rp; i++)
{
- pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
+ pmsg = RING_GET_REQUEST(&ctrl_if_rx_ring, i);
memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
- DPRINTK("Rx-Req %u/%u :: %d/%d\n",
- ctrl_if_rx_req_cons-1,
- ctrl_if->rx_req_prod,
+ DPRINTK("Rx-Req %u/%u :: %d/%d\n", i-1,
+ ctrl_if_rx_ring.sring->req_prod,
msg.type, msg.subtype);
+ if ( msg.length > sizeof(msg.msg) )
+ msg.length = sizeof(msg.msg);
+
if ( msg.length != 0 )
memcpy(msg.msg, pmsg->msg, msg.length);
@@ -201,6 +214,8 @@ static void __ctrl_if_rx_tasklet(unsigned long data)
(*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
}
+ ctrl_if_rx_ring.req_cons = i;
+
if ( dp != ctrl_if_rxmsg_deferred_prod )
{
wmb();
@@ -212,12 +227,10 @@ static void __ctrl_if_rx_tasklet(unsigned long data)
static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
struct pt_regs *regs)
{
- control_if_t *ctrl_if = get_ctrl_if();
-
- if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
+ if ( RING_HAS_UNCONSUMED_RESPONSES(&ctrl_if_tx_ring) )
tasklet_schedule(&ctrl_if_tx_tasklet);
- if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
+ if ( RING_HAS_UNCONSUMED_REQUESTS(&ctrl_if_rx_ring) )
tasklet_schedule(&ctrl_if_rx_tasklet);
return IRQ_HANDLED;
@@ -229,13 +242,13 @@ ctrl_if_send_message_noblock(
ctrl_msg_handler_t hnd,
unsigned long id)
{
- control_if_t *ctrl_if = get_ctrl_if();
unsigned long flags;
+ ctrl_msg_t *dmsg;
int i;
spin_lock_irqsave(&ctrl_if_lock, flags);
- if ( TX_FULL(ctrl_if) )
+ if ( RING_FULL(&ctrl_if_tx_ring) )
{
spin_unlock_irqrestore(&ctrl_if_lock, flags);
return -EAGAIN;
@@ -252,14 +265,15 @@ ctrl_if_send_message_noblock(
}
DPRINTK("Tx-Req %u/%u :: %d/%d\n",
- ctrl_if->tx_req_prod,
- ctrl_if_tx_resp_cons,
+ ctrl_if_tx_ring.req_prod_pvt,
+ ctrl_if_tx_ring.rsp_cons,
msg->type, msg->subtype);
- memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)],
- msg, sizeof(*msg));
- wmb(); /* Write the message before letting the controller peek at it. */
- ctrl_if->tx_req_prod++;
+ dmsg = RING_GET_REQUEST(&ctrl_if_tx_ring,
+ ctrl_if_tx_ring.req_prod_pvt);
+ memcpy(dmsg, msg, sizeof(*msg));
+ ctrl_if_tx_ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS(&ctrl_if_tx_ring);
spin_unlock_irqrestore(&ctrl_if_lock, flags);
@@ -358,10 +372,8 @@ int
ctrl_if_enqueue_space_callback(
struct tq_struct *task)
{
- control_if_t *ctrl_if = get_ctrl_if();
-
/* Fast path. */
- if ( !TX_FULL(ctrl_if) )
+ if ( !RING_FULL(&ctrl_if_tx_ring) )
return 0;
(void)queue_task(task, &ctrl_if_tx_tq);
@@ -372,14 +384,13 @@ ctrl_if_enqueue_space_callback(
* certainly return 'not full'.
*/
smp_mb();
- return TX_FULL(ctrl_if);
+ return RING_FULL(&ctrl_if_tx_ring);
}
void
ctrl_if_send_response(
ctrl_msg_t *msg)
{
- control_if_t *ctrl_if = get_ctrl_if();
unsigned long flags;
ctrl_msg_t *dmsg;
@@ -390,15 +401,16 @@ ctrl_if_send_response(
spin_lock_irqsave(&ctrl_if_lock, flags);
DPRINTK("Tx-Rsp %u :: %d/%d\n",
- ctrl_if->rx_resp_prod,
+ ctrl_if_rx_ring.rsp_prod_pvt,
msg->type, msg->subtype);
- dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
+ dmsg = RING_GET_RESPONSE(&ctrl_if_rx_ring,
+ ctrl_if_rx_ring.rsp_prod_pvt);
if ( dmsg != msg )
memcpy(dmsg, msg, sizeof(*msg));
- wmb(); /* Write the message before letting the controller peek at it. */
- ctrl_if->rx_resp_prod++;
+ ctrl_if_rx_ring.rsp_prod_pvt++;
+ RING_PUSH_RESPONSES(&ctrl_if_rx_ring);
spin_unlock_irqrestore(&ctrl_if_lock, flags);
@@ -491,8 +503,8 @@ void ctrl_if_resume(void)
}
/* Sync up with shared indexes. */
- ctrl_if_tx_resp_cons = ctrl_if->tx_resp_prod;
- ctrl_if_rx_req_cons = ctrl_if->rx_resp_prod;
+ FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring, CONTROL_RING_MEM);
+ BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring, CONTROL_RING_MEM);
ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
ctrl_if_irq = bind_evtchn_to_irq(ctrl_if_evtchn);
@@ -505,11 +517,15 @@ void ctrl_if_resume(void)
void __init ctrl_if_init(void)
{
- int i;
+ control_if_t *ctrl_if = get_ctrl_if();
+ int i;
for ( i = 0; i < 256; i++ )
ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
+ FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring, CONTROL_RING_MEM);
+ BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring, CONTROL_RING_MEM);
+
spin_lock_init(&ctrl_if_lock);
ctrl_if_resume();
@@ -532,12 +548,13 @@ __initcall(ctrl_if_late_setup);
int ctrl_if_transmitter_empty(void)
{
- return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
+ return (ctrl_if_tx_ring.sring->req_prod == ctrl_if_tx_ring.rsp_cons);
+
}
void ctrl_if_discard_responses(void)
{
- ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
+ RING_DROP_PENDING_RESPONSES(&ctrl_if_tx_ring);
}
EXPORT_SYMBOL(ctrl_if_send_message_noblock);
diff --git a/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c b/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
index 22885955c9..74e5d63a96 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
@@ -42,6 +42,7 @@
#include <asm-xen/xen-public/physdev.h>
#include <asm-xen/ctrl_if.h>
#include <asm-xen/hypervisor.h>
+#include <asm-xen/evtchn.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
EXPORT_SYMBOL(force_evtchn_callback);
@@ -59,7 +60,13 @@ static int evtchn_to_irq[NR_EVENT_CHANNELS];
static int irq_to_evtchn[NR_IRQS];
/* IRQ <-> VIRQ mapping. */
-static int virq_to_irq[NR_VIRQS];
+DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
+
+/* evtchn <-> IPI mapping. */
+#ifndef NR_IPIS
+#define NR_IPIS 1
+#endif
+DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
/* Reference counts for bindings to IRQs. */
static int irq_bindcount[NR_IRQS];
@@ -74,8 +81,13 @@ extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
#else
extern asmlinkage unsigned int do_IRQ(struct pt_regs *regs);
#endif
+#if defined (__i386__)
+#define IRQ_REG orig_eax
+#elif defined (__x86_64__)
+#define IRQ_REG orig_rax
+#endif
#define do_IRQ(irq, regs) do { \
- (regs)->orig_eax = (irq); \
+ (regs)->IRQ_REG = (irq); \
do_IRQ((regs)); \
} while (0)
#endif
@@ -95,22 +107,22 @@ void force_evtchn_callback(void)
/* NB. Interrupts are disabled on entry. */
asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
{
- unsigned long l1, l2;
+ u32 l1, l2;
unsigned int l1i, l2i, port;
int irq;
shared_info_t *s = HYPERVISOR_shared_info;
+ vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
- s->vcpu_data[0].evtchn_upcall_pending = 0;
-
+ vcpu_info->evtchn_upcall_pending = 0;
+
/* NB. No need for a barrier here -- XCHG is a barrier on x86. */
- l1 = xchg(&s->evtchn_pending_sel, 0);
+ l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
while ( l1 != 0 )
{
l1i = __ffs(l1);
l1 &= ~(1 << l1i);
- l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
- while ( l2 != 0 )
+ while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
{
l2i = __ffs(l2);
l2 &= ~(1 << l2i);
@@ -142,10 +154,11 @@ int bind_virq_to_irq(int virq)
{
evtchn_op_t op;
int evtchn, irq;
+ int cpu = smp_processor_id();
spin_lock(&irq_mapping_update_lock);
- if ( (irq = virq_to_irq[virq]) == -1 )
+ if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
{
op.cmd = EVTCHNOP_bind_virq;
op.u.bind_virq.virq = virq;
@@ -157,7 +170,7 @@ int bind_virq_to_irq(int virq)
evtchn_to_irq[evtchn] = irq;
irq_to_evtchn[irq] = evtchn;
- virq_to_irq[virq] = irq;
+ per_cpu(virq_to_irq, cpu)[virq] = irq;
}
irq_bindcount[irq]++;
@@ -170,7 +183,8 @@ int bind_virq_to_irq(int virq)
void unbind_virq_from_irq(int virq)
{
evtchn_op_t op;
- int irq = virq_to_irq[virq];
+ int cpu = smp_processor_id();
+ int irq = per_cpu(virq_to_irq, cpu)[virq];
int evtchn = irq_to_evtchn[irq];
spin_lock(&irq_mapping_update_lock);
@@ -185,7 +199,61 @@ void unbind_virq_from_irq(int virq)
evtchn_to_irq[evtchn] = -1;
irq_to_evtchn[irq] = -1;
- virq_to_irq[virq] = -1;
+ per_cpu(virq_to_irq, cpu)[virq] = -1;
+ }
+
+ spin_unlock(&irq_mapping_update_lock);
+}
+
+int bind_ipi_on_cpu_to_irq(int cpu, int ipi)
+{
+ evtchn_op_t op;
+ int evtchn, irq;
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
+ {
+ op.cmd = EVTCHNOP_bind_ipi;
+ op.u.bind_ipi.ipi_edom = cpu;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
+ evtchn = op.u.bind_ipi.port;
+
+ irq = find_unbound_irq();
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
+
+ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
+ } else
+ irq = evtchn_to_irq[evtchn];
+
+ irq_bindcount[irq]++;
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
+}
+
+void unbind_ipi_on_cpu_from_irq(int cpu, int ipi)
+{
+ evtchn_op_t op;
+ int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
+ int irq = irq_to_evtchn[evtchn];
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if ( --irq_bindcount[irq] == 0 )
+ {
+ op.cmd = EVTCHNOP_close;
+ op.u.close.dom = DOMID_SELF;
+ op.u.close.port = evtchn;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
+
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
}
spin_unlock(&irq_mapping_update_lock);
@@ -415,30 +483,15 @@ static struct hw_interrupt_type pirq_type = {
NULL
};
-static irqreturn_t misdirect_interrupt(int irq, void *dev_id,
- struct pt_regs *regs)
-{
- /* nothing */
- return IRQ_HANDLED;
-}
-
-static struct irqaction misdirect_action = {
- misdirect_interrupt,
- SA_INTERRUPT,
- CPU_MASK_NONE,
- "misdirect",
- NULL,
- NULL
-};
-
void irq_suspend(void)
{
int pirq, virq, irq, evtchn;
+ int cpu = smp_processor_id(); /* XXX */
/* Unbind VIRQs from event channels. */
for ( virq = 0; virq < NR_VIRQS; virq++ )
{
- if ( (irq = virq_to_irq[virq]) == -1 )
+ if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
continue;
evtchn = irq_to_evtchn[irq];
@@ -458,13 +511,14 @@ void irq_resume(void)
{
evtchn_op_t op;
int virq, irq, evtchn;
+ int cpu = smp_processor_id(); /* XXX */
for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
for ( virq = 0; virq < NR_VIRQS; virq++ )
{
- if ( (irq = virq_to_irq[virq]) == -1 )
+ if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
continue;
/* Get a new binding from Xen. */
@@ -486,14 +540,17 @@ void irq_resume(void)
void __init init_IRQ(void)
{
int i;
+ int cpu;
irq_ctx_init(0);
spin_lock_init(&irq_mapping_update_lock);
- /* No VIRQ -> IRQ mappings. */
- for ( i = 0; i < NR_VIRQS; i++ )
- virq_to_irq[i] = -1;
+ for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
+ /* No VIRQ -> IRQ mappings. */
+ for ( i = 0; i < NR_VIRQS; i++ )
+ per_cpu(virq_to_irq, cpu)[i] = -1;
+ }
/* No event-channel -> IRQ mappings. */
for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
@@ -528,8 +585,6 @@ void __init init_IRQ(void)
irq_desc[pirq_to_irq(i)].handler = &pirq_type;
}
- (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
-
/* This needs to be done early, but after the IRQ subsystem is alive. */
ctrl_if_init();
}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c b/linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c
index a9d9e511c2..1c01b91787 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c
@@ -51,7 +51,7 @@ __LINKAGE void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
if ( !test_and_set_bit(0, &printed) )
{
HYPERVISOR_vm_assist(VMASST_CMD_disable,
- VMASST_TYPE_4gb_segments_notify);
+ VMASST_TYPE_4gb_segments_notify);
DP("");
DP("***************************************************************");
diff --git a/linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c b/linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c
new file mode 100644
index 0000000000..69293cdb73
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c
@@ -0,0 +1,387 @@
+/******************************************************************************
+ * gnttab.c
+ *
+ * Two sets of functionality:
+ * 1. Granting foreign access to our memory reservation.
+ * 2. Accessing others' memory reservations via grant references.
+ * (i.e., mechanisms for both sender and recipient of grant references)
+ *
+ * Copyright (c) 2005, Christopher Clark
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <asm/pgtable.h>
+#include <asm/fixmap.h>
+#include <asm/uaccess.h>
+#include <asm-xen/xen_proc.h>
+#include <asm-xen/linux-public/privcmd.h>
+#include <asm-xen/gnttab.h>
+
+#if 1
+#define ASSERT(_p) \
+ if ( !(_p) ) { printk(KERN_ALERT"Assertion '%s': line %d, file %s\n", \
+ #_p , __LINE__, __FILE__); *(int*)0=0; }
+#else
+#define ASSERT(_p) ((void)0)
+#endif
+
+#define WPRINTK(fmt, args...) \
+ printk(KERN_WARNING "xen_grant: " fmt, ##args)
+
+
+EXPORT_SYMBOL(gnttab_grant_foreign_access);
+EXPORT_SYMBOL(gnttab_end_foreign_access);
+EXPORT_SYMBOL(gnttab_query_foreign_access);
+EXPORT_SYMBOL(gnttab_grant_foreign_transfer);
+EXPORT_SYMBOL(gnttab_end_foreign_transfer);
+EXPORT_SYMBOL(gnttab_alloc_grant_references);
+EXPORT_SYMBOL(gnttab_free_grant_references);
+EXPORT_SYMBOL(gnttab_claim_grant_reference);
+EXPORT_SYMBOL(gnttab_release_grant_reference);
+EXPORT_SYMBOL(gnttab_grant_foreign_access_ref);
+EXPORT_SYMBOL(gnttab_grant_foreign_transfer_ref);
+
+static grant_ref_t gnttab_free_list[NR_GRANT_ENTRIES];
+static grant_ref_t gnttab_free_head;
+
+static grant_entry_t *shared;
+
+/*
+ * Lock-free grant-entry allocator
+ */
+
+static inline int
+get_free_entry(
+ void)
+{
+ grant_ref_t fh, nfh = gnttab_free_head;
+ do { if ( unlikely((fh = nfh) == NR_GRANT_ENTRIES) ) return -1; }
+ while ( unlikely((nfh = cmpxchg(&gnttab_free_head, fh,
+ gnttab_free_list[fh])) != fh) );
+ return fh;
+}
+
+static inline void
+put_free_entry(
+ grant_ref_t ref)
+{
+ grant_ref_t fh, nfh = gnttab_free_head;
+ do { gnttab_free_list[ref] = fh = nfh; wmb(); }
+ while ( unlikely((nfh = cmpxchg(&gnttab_free_head, fh, ref)) != fh) );
+}
+
+/*
+ * Public grant-issuing interface functions
+ */
+
+int
+gnttab_grant_foreign_access(
+ domid_t domid, unsigned long frame, int readonly)
+{
+ int ref;
+
+ if ( unlikely((ref = get_free_entry()) == -1) )
+ return -ENOSPC;
+
+ shared[ref].frame = frame;
+ shared[ref].domid = domid;
+ wmb();
+ shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
+
+ return ref;
+}
+
+void
+gnttab_grant_foreign_access_ref(
+ grant_ref_t ref, domid_t domid, unsigned long frame, int readonly)
+{
+ shared[ref].frame = frame;
+ shared[ref].domid = domid;
+ wmb();
+ shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
+}
+
+
+int
+gnttab_query_foreign_access( grant_ref_t ref )
+{
+ u16 nflags;
+
+ nflags = shared[ref].flags;
+
+ return ( nflags & (GTF_reading|GTF_writing) );
+}
+
+void
+gnttab_end_foreign_access( grant_ref_t ref, int readonly )
+{
+ u16 flags, nflags;
+
+ nflags = shared[ref].flags;
+ do {
+ if ( (flags = nflags) & (GTF_reading|GTF_writing) )
+ printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+ }
+ while ( (nflags = cmpxchg(&shared[ref].flags, flags, 0)) != flags );
+
+ put_free_entry(ref);
+}
+
+int
+gnttab_grant_foreign_transfer(
+ domid_t domid, unsigned long pfn )
+{
+ int ref;
+
+ if ( unlikely((ref = get_free_entry()) == -1) )
+ return -ENOSPC;
+
+ shared[ref].frame = pfn;
+ shared[ref].domid = domid;
+ wmb();
+ shared[ref].flags = GTF_accept_transfer;
+
+ return ref;
+}
+
+void
+gnttab_grant_foreign_transfer_ref(
+ grant_ref_t ref, domid_t domid, unsigned long pfn )
+{
+ shared[ref].frame = pfn;
+ shared[ref].domid = domid;
+ wmb();
+ shared[ref].flags = GTF_accept_transfer;
+}
+
+unsigned long
+gnttab_end_foreign_transfer(
+ grant_ref_t ref)
+{
+ unsigned long frame = 0;
+ u16 flags;
+
+ flags = shared[ref].flags;
+ ASSERT(flags == (GTF_accept_transfer | GTF_transfer_committed));
+
+ /*
+ * If a transfer is committed then wait for the frame address to appear.
+ * Otherwise invalidate the grant entry against future use.
+ */
+ if ( likely(flags != GTF_accept_transfer) ||
+ (cmpxchg(&shared[ref].flags, flags, 0) != GTF_accept_transfer) )
+ while ( unlikely((frame = shared[ref].frame) == 0) )
+ cpu_relax();
+
+ put_free_entry(ref);
+
+ return frame;
+}
+
+void
+gnttab_free_grant_references( u16 count, grant_ref_t head )
+{
+ /* TODO: O(N)...? */
+ grant_ref_t to_die = 0, next = head;
+ int i;
+
+ for ( i = 0; i < count; i++ )
+ to_die = next;
+ next = gnttab_free_list[next];
+ put_free_entry( to_die );
+}
+
+int
+gnttab_alloc_grant_references( u16 count,
+ grant_ref_t *head,
+ grant_ref_t *terminal )
+{
+ int i;
+ grant_ref_t h = gnttab_free_head;
+
+ for ( i = 0; i < count; i++ )
+ if ( unlikely(get_free_entry() == -1) )
+ goto not_enough_refs;
+
+ *head = h;
+ *terminal = gnttab_free_head;
+
+ return 0;
+
+not_enough_refs:
+ gnttab_free_head = h;
+ return -ENOSPC;
+}
+
+int
+gnttab_claim_grant_reference( grant_ref_t *private_head,
+ grant_ref_t terminal )
+{
+ grant_ref_t g;
+ if ( unlikely((g = *private_head) == terminal) )
+ return -ENOSPC;
+ *private_head = gnttab_free_list[g];
+ return g;
+}
+
+void
+gnttab_release_grant_reference( grant_ref_t *private_head,
+ grant_ref_t release )
+{
+ gnttab_free_list[release] = *private_head;
+ *private_head = release;
+}
+
+/*
+ * ProcFS operations
+ */
+
+#ifdef CONFIG_PROC_FS
+
+static struct proc_dir_entry *grant_pde;
+
+static int grant_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long data)
+{
+ int ret;
+ privcmd_hypercall_t hypercall;
+
+ /* XXX Need safety checks here if using for anything other
+ * than debugging */
+ return -ENOSYS;
+
+ if ( cmd != IOCTL_PRIVCMD_HYPERCALL )
+ return -ENOSYS;
+
+ if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
+ return -EFAULT;
+
+ if ( hypercall.op != __HYPERVISOR_grant_table_op )
+ return -ENOSYS;
+
+ /* hypercall-invoking asm taken from privcmd.c */
+ __asm__ __volatile__ (
+ "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
+ "movl 4(%%eax),%%ebx ;"
+ "movl 8(%%eax),%%ecx ;"
+ "movl 12(%%eax),%%edx ;"
+ "movl 16(%%eax),%%esi ;"
+ "movl 20(%%eax),%%edi ;"
+ "movl (%%eax),%%eax ;"
+ TRAP_INSTR "; "
+ "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
+ : "=a" (ret) : "0" (&hypercall) : "memory" );
+
+ return ret;
+}
+
+static struct file_operations grant_file_ops = {
+ ioctl: grant_ioctl,
+};
+
+static int grant_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ unsigned int i;
+ grant_entry_t *gt;
+
+ gt = (grant_entry_t *)shared;
+ len = 0;
+
+ for ( i = 0; i < NR_GRANT_ENTRIES; i++ )
+ /* TODO: safety catch here until this can handle >PAGE_SIZE output */
+ if (len > (PAGE_SIZE - 200))
+ {
+ len += sprintf( page + len, "Truncated.\n");
+ break;
+ }
+
+ if ( gt[i].flags )
+ len += sprintf( page + len,
+ "Grant: ref (0x%x) flags (0x%hx) dom (0x%hx) frame (0x%x)\n",
+ i,
+ gt[i].flags,
+ gt[i].domid,
+ gt[i].frame );
+
+ *eof = 1;
+ return len;
+}
+
+static int grant_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ /* TODO: implement this */
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_PROC_FS */
+
+int gnttab_resume(void)
+{
+ gnttab_setup_table_t setup;
+ unsigned long frames[NR_GRANT_FRAMES];
+ int i;
+
+ setup.dom = DOMID_SELF;
+ setup.nr_frames = NR_GRANT_FRAMES;
+ setup.frame_list = frames;
+
+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0);
+ BUG_ON(setup.status != 0);
+
+ for ( i = 0; i < NR_GRANT_FRAMES; i++ )
+ set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
+
+ return 0;
+}
+
+int gnttab_suspend(void)
+{
+ int i;
+
+ for ( i = 0; i < NR_GRANT_FRAMES; i++ )
+ clear_fixmap(FIX_GNTTAB_END - i);
+
+ return 0;
+}
+
+static int __init gnttab_init(void)
+{
+ int i;
+
+ BUG_ON(gnttab_resume());
+
+ shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
+
+ for ( i = 0; i < NR_GRANT_ENTRIES; i++ )
+ gnttab_free_list[i] = i + 1;
+
+#ifdef CONFIG_PROC_FS
+ /*
+ * /proc/xen/grant : used by libxc to access grant tables
+ */
+ if ( (grant_pde = create_xen_proc_entry("grant", 0600)) == NULL )
+ {
+ WPRINTK("Unable to create grant xen proc entry\n");
+ return -1;
+ }
+
+ grant_file_ops.read = grant_pde->proc_fops->read;
+ grant_file_ops.write = grant_pde->proc_fops->write;
+
+ grant_pde->proc_fops = &grant_file_ops;
+
+ grant_pde->read_proc = &grant_read;
+ grant_pde->write_proc = &grant_write;
+#endif
+
+ printk("Grant table initialized\n");
+ return 0;
+}
+
+__initcall(gnttab_init);
diff --git a/linux-2.6.11-xen-sparse/arch/xen/kernel/reboot.c b/linux-2.6.11-xen-sparse/arch/xen/kernel/reboot.c
index 9dce9e2abc..4e6dcf649d 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/reboot.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/reboot.c
@@ -76,6 +76,20 @@ static void __do_suspend(void)
#define netif_resume() do{}while(0)
#endif
+#ifdef CONFIG_XEN_USB_FRONTEND
+ extern void usbif_resume();
+#else
+#define usbif_resume() do{}while(0)
+#endif
+
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ extern int gnttab_suspend(void);
+ extern int gnttab_resume(void);
+#else
+#define gnttab_suspend() do{}while(0)
+#define gnttab_resume() do{}while(0)
+#endif
+
extern void time_suspend(void);
extern void time_resume(void);
extern unsigned long max_pfn;
@@ -99,31 +113,22 @@ static void __do_suspend(void)
irq_suspend();
+ gnttab_suspend();
+
HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
clear_fixmap(FIX_SHARED_INFO);
memcpy(&suspend_record->resume_info, &xen_start_info,
- sizeof(xen_start_info));
+ sizeof(xen_start_info));
HYPERVISOR_suspend(virt_to_machine(suspend_record) >> PAGE_SHIFT);
- HYPERVISOR_vm_assist(VMASST_CMD_enable,
- VMASST_TYPE_4gb_segments);
-#ifdef CONFIG_XEN_WRITABLE_PAGETABLES
- HYPERVISOR_vm_assist(VMASST_CMD_enable,
- VMASST_TYPE_writable_pagetables);
-#endif
-
shutting_down = -1;
memcpy(&xen_start_info, &suspend_record->resume_info,
- sizeof(xen_start_info));
+ sizeof(xen_start_info));
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
-#else
set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
-#endif
HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
@@ -137,6 +142,7 @@ static void __do_suspend(void)
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+ gnttab_resume();
irq_resume();
@@ -148,6 +154,8 @@ static void __do_suspend(void)
netif_resume();
+ usbif_resume();
+
__sti();
out:
diff --git a/linux-2.6.11-xen-sparse/arch/xen/kernel/smp.c b/linux-2.6.11-xen-sparse/arch/xen/kernel/smp.c
new file mode 100644
index 0000000000..fb2a6eb6fa
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/smp.c
@@ -0,0 +1,16 @@
+/* Copyright (C) 2004, Christian Limpach */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/threads.h>
+
+/*
+ * the frequency of the profiling timer can be changed
+ * by writing a multiplier value into /proc/profile.
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ printk("setup_profiling_timer\n");
+
+ return 0;
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/Kconfig b/linux-2.6.11-xen-sparse/arch/xen/x86_64/Kconfig
new file mode 100644
index 0000000000..b2876cad4a
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/Kconfig
@@ -0,0 +1,456 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+# Note: ISA is disabled and will hopefully never be enabled.
+# If you managed to buy an ISA x86-64 box you'll have to fix all the
+# ISA drivers you need yourself.
+#
+
+menu "X86_64 processor configuration"
+
+config XENARCH
+ string
+ default x86_64
+
+config X86_64
+ bool
+ default y
+ help
+ Port to the x86-64 architecture. x86-64 is a 64-bit extension to the
+ classical 32-bit x86 architecture. For details see
+ <http://www.x86-64.org/>.
+
+config X86
+ bool
+ default y
+
+config 64BIT
+ def_bool y
+
+config MMU
+ bool
+ default y
+
+config ISA
+ bool
+
+config SBUS
+ bool
+
+config RWSEM_GENERIC_SPINLOCK
+ bool
+ default y
+
+config RWSEM_XCHGADD_ALGORITHM
+ bool
+
+config GENERIC_CALIBRATE_DELAY
+ bool
+ default y
+
+config X86_CMPXCHG
+ bool
+ default y
+
+config EARLY_PRINTK
+ bool "Early Printk"
+ default n
+ help
+ Write kernel log output directly into the VGA buffer or to a serial
+ port.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation
+ it is not recommended because it looks ugly and doesn't cooperate
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
+config HPET_TIMER
+ bool
+ default n
+ help
+ Use the IA-PC HPET (High Precision Event Timer) to manage
+ time in preference to the PIT and RTC, if a HPET is
+ present. The HPET provides a stable time base on SMP
+ systems, unlike the RTC, but it is more expensive to access,
+ as it is off-chip. You can find the HPET spec at
+ <http://www.intel.com/labs/platcomp/hpet/hpetspec.htm>.
+
+ If unsure, say Y.
+
+config HPET_EMULATE_RTC
+ bool "Provide RTC interrupt"
+ depends on HPET_TIMER && RTC=y
+
+config GENERIC_ISA_DMA
+ bool
+ default y
+
+config GENERIC_IOMAP
+ bool
+ default y
+
+#source "init/Kconfig"
+
+
+menu "Processor type and features"
+
+choice
+ prompt "Processor family"
+ default MK8
+
+#config MK8
+# bool "AMD-Opteron/Athlon64"
+# help
+# Optimize for AMD Opteron/Athlon64/Hammer/K8 CPUs.
+
+config MPSC
+ bool "Intel x86-64"
+ help
+ Optimize for Intel IA32 with 64bit extension CPUs
+ (Prescott/Nocona/Potomac)
+
+config GENERIC_CPU
+ bool "Generic-x86-64"
+ help
+ Generic x86-64 CPU.
+
+endchoice
+
+#
+# Define implied options from the CPU selection here
+#
+config X86_L1_CACHE_BYTES
+ int
+ default "128" if GENERIC_CPU || MPSC
+ default "64" if MK8
+
+config X86_L1_CACHE_SHIFT
+ int
+ default "7" if GENERIC_CPU || MPSC
+ default "6" if MK8
+
+config X86_TSC
+ bool
+ default n
+
+config X86_GOOD_APIC
+ bool
+ default y
+
+config MICROCODE
+ tristate "/dev/cpu/microcode - Intel CPU microcode support"
+ ---help---
+ If you say Y here the 'File systems' section, you will be
+ able to update the microcode on Intel processors. You will
+ obviously need the actual microcode binary data itself which is
+ not shipped with the Linux kernel.
+
+ For latest news and information on obtaining all the required
+ ingredients for this driver, check:
+ <http://www.urbanmyth.org/microcode/>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called microcode.
+ If you use modprobe or kmod you may also want to add the line
+ 'alias char-major-10-184 microcode' to your /etc/modules.conf file.
+
+config X86_MSR
+ tristate "/dev/cpu/*/msr - Model-specific register support"
+ help
+ This device gives privileged processes access to the x86
+ Model-Specific Registers (MSRs). It is a character device with
+ major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
+ MSR accesses are directed to a specific CPU on multi-processor
+ systems.
+
+config X86_CPUID
+ tristate "/dev/cpu/*/cpuid - CPU information support"
+ help
+ This device gives processes access to the x86 CPUID instruction to
+ be executed on a specific processor. It is a character device
+ with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
+ /dev/cpu/31/cpuid.
+
+# disable it for opteron optimized builds because it pulls in ACPI_BOOT
+config X86_HT
+ bool
+ depends on SMP && !MK8
+ default y
+
+config MATH_EMULATION
+ bool
+
+config MCA
+ bool
+
+config EISA
+ bool
+
+#config X86_IO_APIC
+# bool
+# default n
+
+#config X86_LOCAL_APIC
+# bool
+# default n
+
+config MTRR
+ bool "MTRR (Memory Type Range Register) support"
+ ---help---
+ On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ the Memory Type Range Registers (MTRRs) may be used to control
+ processor access to memory ranges. This is most useful if you have
+ a video (VGA) card on a PCI or AGP bus. Enabling write-combining
+ allows bus write transfers to be combined into a larger transfer
+ before bursting over the PCI/AGP bus. This can increase performance
+ of image write operations 2.5 times or more. Saying Y here creates a
+ /proc/mtrr file which may be used to manipulate your processor's
+ MTRRs. Typically the X server should use this.
+
+ This code has a reasonably generic interface so that similar
+ control registers on other processors can be easily supported
+ as well.
+
+ Saying Y here also fixes a problem with buggy SMP BIOSes which only
+ set the MTRRs for the boot CPU and not for the secondary CPUs. This
+ can lead to all sorts of problems, so it's good to say Y here.
+
+ Just say Y here, all x86-64 machines support MTRRs.
+
+ See <file:Documentation/mtrr.txt> for more information.
+
+config SMP
+ bool "Symmetric multi-processing support"
+ ---help---
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, like most personal computers, say N. If
+ you have a system with more than one CPU, say Y.
+
+ If you say N here, the kernel will run on single and multiprocessor
+ machines, but will use only one CPU of a multiprocessor machine. If
+ you say Y here, the kernel will run on many, but not all,
+ singleprocessor machines. On a singleprocessor machine, the kernel
+ will run faster if you say N here.
+
+ If you don't know what to do here, say N.
+
+config PREEMPT
+ bool "Preemptible Kernel"
+ ---help---
+ This option reduces the latency of the kernel when reacting to
+ real-time or interactive events by allowing a low priority process to
+ be preempted even if it is in kernel mode executing a system call.
+ This allows applications to run more reliably even when the system is
+ under load. On contrary it may also break your drivers and add
+ priority inheritance problems to your system. Don't select it if
+ you rely on a stable system or have slightly obscure hardware.
+ It's also not very well tested on x86-64 currently.
+ You have been warned.
+
+ Say Y here if you are feeling brave and building a kernel for a
+ desktop, embedded or real-time system. Say N if you are unsure.
+
+config SCHED_SMT
+ bool "SMT (Hyperthreading) scheduler support"
+ depends on SMP
+ default off
+ help
+ SMT scheduler support improves the CPU scheduler's decision making
+ when dealing with Intel Pentium 4 chips with HyperThreading at a
+ cost of slightly increased overhead in some places. If unsure say
+ N here.
+
+config K8_NUMA
+ bool "K8 NUMA support"
+ select NUMA
+ depends on SMP
+ help
+ Enable NUMA (Non Unified Memory Architecture) support for
+ AMD Opteron Multiprocessor systems. The kernel will try to allocate
+ memory used by a CPU on the local memory controller of the CPU
+ and add some more NUMA awareness to the kernel.
+ This code is recommended on all multiprocessor Opteron systems
+ and normally doesn't hurt on others.
+
+config NUMA_EMU
+ bool "NUMA emulation support"
+ select NUMA
+ depends on SMP
+ help
+ Enable NUMA emulation. A flat machine will be split
+ into virtual nodes when booted with "numa=fake=N", where N is the
+ number of nodes. This is only useful for debugging.
+
+config DISCONTIGMEM
+ bool
+ depends on NUMA
+ default y
+
+config NUMA
+ bool
+ default n
+
+config HAVE_DEC_LOCK
+ bool
+ depends on SMP
+ default y
+
+# actually 64 maximum, but you need to fix the APIC code first
+# to use clustered mode or whatever your big iron needs
+config NR_CPUS
+ int "Maximum number of CPUs (2-8)"
+ range 2 8
+ depends on SMP
+ default "8"
+ help
+ This allows you to specify the maximum number of CPUs which this
+ kernel will support. The maximum supported value is 32 and the
+ minimum value which makes sense is 2.
+
+ This is purely to save memory - each supported CPU requires
+ memory in the static kernel configuration.
+
+config GART_IOMMU
+ bool "IOMMU support"
+ depends on PCI
+ help
+ Support the K8 IOMMU. Needed to run systems with more than 4GB of memory
+ properly with 32-bit PCI devices that do not support DAC (Double Address
+ Cycle). The IOMMU can be turned off at runtime with the iommu=off parameter.
+ Normally the kernel will take the right choice by itself.
+ If unsure, say Y.
+
+# need this always enabled with GART_IOMMU for the VIA workaround
+config SWIOTLB
+ bool
+ depends on GART_IOMMU
+ default y
+
+config DUMMY_IOMMU
+ bool
+ depends on !GART_IOMMU && !SWIOTLB
+ default y
+ help
+ Don't use IOMMU code. This will cause problems when you have more than 4GB
+ of memory and any 32-bit devices. Don't turn on unless you know what you
+ are doing.
+
+config X86_MCE
+ bool "Machine check support" if EMBEDDED
+ default n
+ help
+ Include a machine check error handler to report hardware errors.
+ This version will require the mcelog utility to decode some
+ machine check error logs. See
+ ftp://ftp.x86-64.org/pub/linux/tools/mcelog
+
+endmenu
+
+#
+# Use the generic interrupt handling code in kernel/irq/:
+#
+config GENERIC_HARDIRQS
+ bool
+ default y
+
+config GENERIC_IRQ_PROBE
+ bool
+ default y
+
+menu "Power management options"
+
+source kernel/power/Kconfig
+
+source "arch/x86_64/kernel/cpufreq/Kconfig"
+
+endmenu
+
+menu "Bus options (PCI etc.)"
+
+config PCI
+ bool "PCI support"
+
+# x86-64 doesn't support PCI BIOS access from long mode so always go direct.
+config PCI_DIRECT
+ bool
+ depends on PCI
+ default y
+
+config PCI_MMCONFIG
+ bool "Support mmconfig PCI config space access"
+ depends on PCI
+ select ACPI_BOOT
+
+config UNORDERED_IO
+ bool "Unordered IO mapping access"
+ depends on EXPERIMENTAL
+ help
+ Use unordered stores to access IO memory mappings in device drivers.
+ Still very experimental. When a driver works on IA64/ppc64/pa-risc it should
+ work with this option, but it makes the drivers behave differently
+ from i386. Requires that the driver writer used memory barriers
+ properly.
+
+#source "drivers/pci/Kconfig"
+
+#source "drivers/pcmcia/Kconfig"
+
+#source "drivers/pci/hotplug/Kconfig"
+
+endmenu
+
+
+menu "Executable file formats / Emulations"
+
+# source "fs/Kconfig.binfmt"
+
+config IA32_EMULATION
+ bool "IA32 Emulation"
+ help
+ Include code to run 32-bit programs under a 64-bit kernel. You should likely
+ turn this on, unless you're 100% sure that you don't have any 32-bit programs
+ left.
+
+config IA32_AOUT
+ bool "IA32 a.out support"
+ depends on IA32_EMULATION
+ help
+ Support old a.out binaries in the 32bit emulation.
+
+config COMPAT
+ bool
+ depends on IA32_EMULATION
+ default y
+
+config SYSVIPC_COMPAT
+ bool
+ depends on COMPAT && SYSVIPC
+ default y
+
+config UID16
+ bool
+ depends on IA32_EMULATION
+ default y
+
+endmenu
+
+# source drivers/Kconfig
+
+# source "drivers/firmware/Kconfig"
+
+# source fs/Kconfig
+
+#source "arch/x86_64/oprofile/Kconfig"
+
+#source "arch/x86_64/Kconfig.debug"
+
+# source "security/Kconfig"
+
+# source "crypto/Kconfig"
+
+# source "lib/Kconfig"
+
+endmenu
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/Makefile b/linux-2.6.11-xen-sparse/arch/xen/x86_64/Makefile
new file mode 100644
index 0000000000..9f506b3203
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/Makefile
@@ -0,0 +1,92 @@
+#
+# x86_64/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#
+# 19990713 Artur Skawina <skawina@geocities.com>
+# Added '-march' and '-mpreferred-stack-boundary' support
+# 20000913 Pavel Machek <pavel@suse.cz>
+# Converted for x86_64 architecture
+# 20010105 Andi Kleen, add IA32 compiler.
+# ....and later removed it again....
+# 20050205 Jun Nakajima <jun.nakajima@intel.com>
+# Modified for Xen
+#
+# $Id: Makefile,v 1.31 2002/03/22 15:56:07 ak Exp $
+
+#
+# early bootup linking needs 32bit. You can either use real 32bit tools
+# here or 64bit tools in 32bit mode.
+#
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+IA32_CC := $(CC) $(CPPFLAGS) -m32 -O2 -fomit-frame-pointer
+IA32_LD := $(LD) -m elf_i386
+IA32_AS := $(CC) $(AFLAGS) -m32 -Wa,--32 -traditional -c
+IA32_OBJCOPY := $(CROSS_COMPILE)objcopy
+IA32_CPP := $(CROSS_COMPILE)gcc -m32 -E
+export IA32_CC IA32_LD IA32_AS IA32_OBJCOPY IA32_CPP
+
+
+LDFLAGS := -m elf_x86_64
+#LDFLAGS_vmlinux := -e stext
+
+CHECKFLAGS += -D__x86_64__ -m64
+
+cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
+cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+CFLAGS += $(cflags-y)
+
+CFLAGS += -mno-red-zone
+CFLAGS += -mcmodel=kernel
+CFLAGS += -pipe
+# this makes reading assembly source easier, but produces worse code
+# actually it makes the kernel smaller too.
+CFLAGS += -fno-reorder-blocks
+CFLAGS += -Wno-sign-compare
+ifneq ($(CONFIG_DEBUG_INFO),y)
+CFLAGS += -fno-asynchronous-unwind-tables
+# -fweb shrinks the kernel a bit, but the difference is very small
+# it also messes up debugging, so don't use it for now.
+#CFLAGS += $(call cc-option,-fweb)
+endif
+# -funit-at-a-time shrinks the kernel .text considerably
+# unfortunately it makes reading oopses harder.
+CFLAGS += $(call cc-option,-funit-at-a-time,)
+
+head-y := arch/xen/x86_64/kernel/head.o arch/xen/x86_64/kernel/head64.o arch/xen/x86_64/kernel/init_task.o
+
+libs-y += arch/x86_64/lib/
+core-y += arch/xen/x86_64/kernel/ arch/xen/x86_64/mm/
+core-$(CONFIG_IA32_EMULATION) += arch/xen/x86_64/ia32/
+drivers-$(CONFIG_PCI) += arch/xen/x86_64/pci/
+drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/
+
+# for clean
+obj- += kernel/ mm/ pci/
+
+xenflags-y += -Iinclude/asm-xen/asm-x86_64/mach-xen
+
+CFLAGS += $(xenflags-y)
+AFLAGS += $(xenflags-y)
+
+prepare: include/asm-$(XENARCH)/asm_offset.h
+CLEAN_FILES += include/asm-$(XENARCH)/asm_offset.h
+
+arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/.asm-ignore \
+ include/linux/version.h include/config/MARKER
+
+
+include/asm-$(XENARCH)/asm_offset.h: arch/xen/x86_64/kernel/asm-offsets.s
+ $(call filechk,gen-asm-offsets)
+ ln -fsn asm_offset.h include/asm-$(XENARCH)/offset.h
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/Makefile b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/Makefile
new file mode 100644
index 0000000000..7f5df30bc1
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/Makefile
@@ -0,0 +1,66 @@
+#
+# Makefile for the linux kernel.
+#
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/kernel
+
+extra-y := head.o head64.o init_task.o
+
+obj-y := process.o signal.o entry.o traps.o \
+ ioport.o ldt.o setup.o \
+ x8664_ksyms.o vsyscall.o \
+ setup64.o e820.o irq.o early_printk.o
+c-obj-y := semaphore.o i387.o sys_x86_64.o \
+ ptrace.o quirks.o syscall.o
+
+i386-obj-y := time.o
+obj-y += ../../i386/kernel/timers/
+
+s-obj-y :=
+
+#obj-$(CONFIG_X86_MCE) += mce.o
+#obj-$(CONFIG_MTRR) += ../../i386/kernel/cpu/mtrr/
+#obj-$(CONFIG_ACPI_BOOT) += acpi/
+obj-$(CONFIG_X86_MSR) += msr.o
+obj-$(CONFIG_MICROCODE) += microcode.o
+obj-$(CONFIG_X86_CPUID) += cpuid.o
+#obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o
+#obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
+#obj-$(CONFIG_X86_IO_APIC) += io_apic.o mpparse.o \
+# genapic.o genapic_cluster.o genapic_flat.o
+#obj-$(CONFIG_PM) += suspend.o
+#obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
+#obj-$(CONFIG_CPU_FREQ) += cpufreq/
+#obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+#obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o
+c-obj-$(CONFIG_DUMMY_IOMMU) += pci-nommu.o pci-dma.o
+#obj-$(CONFIG_SWIOTLB) += swiotlb.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+
+c-obj-$(CONFIG_MODULES) += module.o
+
+#obj-y += topology.o
+c-obj-y += intel_cacheinfo.o
+
+bootflag-y += ../../../i386/kernel/bootflag.o
+cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../../i386/kernel/cpuid.o
+topology-y += ../../../i386/mach-default/topology.o
+swiotlb-$(CONFIG_SWIOTLB) += ../../../ia64/lib/swiotlb.o
+microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../../i386/kernel/microcode.o
+intel_cacheinfo-y += ../../../i386/kernel/cpu/intel_cacheinfo.o
+quirks-y += ../../../i386/kernel/quirks.o
+
+c-link := init_task.o
+s-link := vsyscall.o
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
+ ln -fsn $(srctree)/arch/x86_64/kernel/$(notdir $@) $@
+
+$(patsubst %.o,$(obj)/%.c,$(i386-obj-y)):
+ ln -fsn $(srctree)/arch/xen/i386/kernel/$(notdir $@) $@
+
+obj-y += $(c-obj-y) $(s-obj-y) $(i386-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link) $(i386-obj-y))
+clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/asm-offsets.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/asm-offsets.c
new file mode 100644
index 0000000000..b965d6d1eb
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/asm-offsets.c
@@ -0,0 +1,70 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+
+#include <linux/sched.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/suspend.h>
+#include <asm/pda.h>
+#include <asm/processor.h>
+#include <asm/segment.h>
+#include <asm/thread_info.h>
+#include <asm/ia32.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+#define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry))
+ ENTRY(state);
+ ENTRY(flags);
+ ENTRY(thread);
+ ENTRY(pid);
+ BLANK();
+#undef ENTRY
+#define ENTRY(entry) DEFINE(threadinfo_ ## entry, offsetof(struct thread_info, entry))
+ ENTRY(flags);
+ ENTRY(addr_limit);
+ ENTRY(preempt_count);
+ BLANK();
+#undef ENTRY
+#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
+ ENTRY(kernelstack);
+ ENTRY(oldrsp);
+ ENTRY(pcurrent);
+ ENTRY(irqrsp);
+ ENTRY(irqcount);
+ ENTRY(cpunumber);
+ ENTRY(irqstackptr);
+ ENTRY(kernel_mode);
+ BLANK();
+#undef ENTRY
+#ifdef CONFIG_IA32_EMULATION
+#define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry))
+ ENTRY(eax);
+ ENTRY(ebx);
+ ENTRY(ecx);
+ ENTRY(edx);
+ ENTRY(esi);
+ ENTRY(edi);
+ ENTRY(ebp);
+ ENTRY(esp);
+ ENTRY(eip);
+ BLANK();
+#undef ENTRY
+ DEFINE(IA32_RT_SIGFRAME_sigcontext,
+ offsetof (struct rt_sigframe32, uc.uc_mcontext));
+ BLANK();
+#endif
+ DEFINE(SIZEOF_PBE, sizeof(struct pbe));
+ DEFINE(pbe_address, offsetof(struct pbe, address));
+ DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
+ return 0;
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/e820.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/e820.c
new file mode 100644
index 0000000000..7c56a9f53c
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/e820.c
@@ -0,0 +1,533 @@
+/*
+ * Handle the memory map.
+ * The functions here do the job until bootmem takes over.
+ * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include <asm/e820.h>
+#include <asm/proto.h>
+#include <asm/bootsetup.h>
+
+/*
+ * PFN of last memory page.
+ */
+unsigned long end_pfn;
+unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
+unsigned long end_pfn_map;
+
+/*
+ * Add a memory region to the kernel e820 map.
+ */
+void __init add_memory_region(unsigned long start, unsigned long size, int type)
+{
+ int x = e820.nr_map;
+
+ if (x == E820MAX) {
+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+ return;
+ }
+
+ e820.map[x].addr = start;
+ e820.map[x].size = size;
+ e820.map[x].type = type;
+ e820.nr_map++;
+}
+
+#if 0
+extern char _end[];
+
+/*
+ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
+ * The direct mapping extends to end_pfn_map, so that we can directly access
+ * apertures, ACPI and other tables without having to play with fixmaps.
+ */
+
+/*
+ * Last pfn which the user wants to use.
+ */
+
+extern struct resource code_resource, data_resource;
+
+/* Check for some hardcoded bad areas that early boot is not allowed to touch */
+static inline int bad_addr(unsigned long *addrp, unsigned long size)
+{
+ unsigned long addr = *addrp, last = addr + size;
+
+ /* various gunk below that needed for SMP startup */
+ if (addr < 0x8000) {
+ *addrp = 0x8000;
+ return 1;
+ }
+
+ /* direct mapping tables of the kernel */
+ if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
+ *addrp = table_end << PAGE_SHIFT;
+ return 1;
+ }
+
+ /* initrd */
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
+ addr < INITRD_START+INITRD_SIZE) {
+ *addrp = INITRD_START + INITRD_SIZE;
+ return 1;
+ }
+#endif
+ /* kernel code + 640k memory hole (later should not be needed, but
+ be paranoid for now) */
+ if (last >= 640*1024 && addr < __pa_symbol(&_end)) {
+ *addrp = __pa_symbol(&_end);
+ return 1;
+ }
+ /* XXX ramdisk image here? */
+ return 0;
+}
+
+int __init e820_mapped(unsigned long start, unsigned long end, unsigned type)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ if (type && ei->type != type)
+ continue;
+ if (ei->addr >= end || ei->addr + ei->size < start)
+ continue;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Find a free area in a specific range.
+ */
+unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long addr = ei->addr, last;
+ if (ei->type != E820_RAM)
+ continue;
+ if (addr < start)
+ addr = start;
+ if (addr > ei->addr + ei->size)
+ continue;
+ while (bad_addr(&addr, size) && addr+size < ei->addr + ei->size)
+ ;
+ last = addr + size;
+ if (last > ei->addr + ei->size)
+ continue;
+ if (last > end)
+ continue;
+ return addr;
+ }
+ return -1UL;
+}
+
+/*
+ * Free bootmem based on the e820 table for a node.
+ */
+void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long last, addr;
+
+ if (ei->type != E820_RAM ||
+ ei->addr+ei->size <= start ||
+ ei->addr > end)
+ continue;
+
+ addr = round_up(ei->addr, PAGE_SIZE);
+ if (addr < start)
+ addr = start;
+
+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (last >= end)
+ last = end;
+
+ if (last > addr && last-addr >= PAGE_SIZE)
+ free_bootmem_node(pgdat, addr, last-addr);
+ }
+}
+
+/*
+ * Find the highest page frame number we have available
+ */
+unsigned long __init e820_end_of_ram(void)
+{
+ int i;
+ unsigned long end_pfn = 0;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = round_up(ei->addr, PAGE_SIZE);
+ end = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (start >= end)
+ continue;
+ if (ei->type == E820_RAM) {
+ if (end > end_pfn<<PAGE_SHIFT)
+ end_pfn = end>>PAGE_SHIFT;
+ } else {
+ if (end > end_pfn_map<<PAGE_SHIFT)
+ end_pfn_map = end>>PAGE_SHIFT;
+ }
+ }
+
+ if (end_pfn > end_pfn_map)
+ end_pfn_map = end_pfn;
+ if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
+ end_pfn_map = MAXMEM>>PAGE_SHIFT;
+ if (end_pfn > end_user_pfn)
+ end_pfn = end_user_pfn;
+ if (end_pfn > end_pfn_map)
+ end_pfn = end_pfn_map;
+
+ return end_pfn;
+}
+
+/*
+ * Mark e820 reserved areas as busy for the resource manager.
+ */
+void __init e820_reserve_resources(void)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct resource *res;
+ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+ continue;
+ res = alloc_bootmem_low(sizeof(struct resource));
+ switch (e820.map[i].type) {
+ case E820_RAM: res->name = "System RAM"; break;
+ case E820_ACPI: res->name = "ACPI Tables"; break;
+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
+ default: res->name = "reserved";
+ }
+ res->start = e820.map[i].addr;
+ res->end = res->start + e820.map[i].size - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ request_resource(&iomem_resource, res);
+ if (e820.map[i].type == E820_RAM) {
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+ }
+ }
+}
+
+
+void __init e820_print_map(char *who)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ printk(" %s: %016Lx - %016Lx ", who,
+ (unsigned long long) e820.map[i].addr,
+ (unsigned long long) (e820.map[i].addr + e820.map[i].size));
+ switch (e820.map[i].type) {
+ case E820_RAM: printk("(usable)\n");
+ break;
+ case E820_RESERVED:
+ printk("(reserved)\n");
+ break;
+ case E820_ACPI:
+ printk("(ACPI data)\n");
+ break;
+ case E820_NVS:
+ printk("(ACPI NVS)\n");
+ break;
+ default: printk("type %u\n", e820.map[i].type);
+ break;
+ }
+ }
+}
+
+/*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries. The following
+ * replaces the original e820 map with a new one, removing overlaps.
+ *
+ */
+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+{
+ struct change_member {
+ struct e820entry *pbios; /* pointer to original bios entry */
+ unsigned long long addr; /* address for this change point */
+ };
+ static struct change_member change_point_list[2*E820MAX] __initdata;
+ static struct change_member *change_point[2*E820MAX] __initdata;
+ static struct e820entry *overlap_list[E820MAX] __initdata;
+ static struct e820entry new_bios[E820MAX] __initdata;
+ struct change_member *change_tmp;
+ unsigned long current_type, last_type;
+ unsigned long long last_addr;
+ int chgidx, still_changing;
+ int overlap_entries;
+ int new_bios_entry;
+ int old_nr, new_nr;
+ int i;
+
+ /*
+ Visually we're performing the following (1,2,3,4 = memory types)...
+
+ Sample memory map (w/overlaps):
+ ____22__________________
+ ______________________4_
+ ____1111________________
+ _44_____________________
+ 11111111________________
+ ____________________33__
+ ___________44___________
+ __________33333_________
+ ______________22________
+ ___________________2222_
+ _________111111111______
+ _____________________11_
+ _________________4______
+
+ Sanitized equivalent (no overlap):
+ 1_______________________
+ _44_____________________
+ ___1____________________
+ ____22__________________
+ ______11________________
+ _________1______________
+ __________3_____________
+ ___________44___________
+ _____________33_________
+ _______________2________
+ ________________1_______
+ _________________4______
+ ___________________2____
+ ____________________33__
+ ______________________4_
+ */
+
+ /* if there's only one memory region, don't bother */
+ if (*pnr_map < 2)
+ return -1;
+
+ old_nr = *pnr_map;
+
+ /* bail out if we find any unreasonable addresses in bios map */
+ for (i=0; i<old_nr; i++)
+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
+ return -1;
+
+ /* create pointers for initial change-point information (for sorting) */
+ for (i=0; i < 2*old_nr; i++)
+ change_point[i] = &change_point_list[i];
+
+ /* record all known change-points (starting and ending addresses) */
+ chgidx = 0;
+ for (i=0; i < old_nr; i++) {
+ change_point[chgidx]->addr = biosmap[i].addr;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ }
+
+ /* sort change-point list by memory addresses (low -> high) */
+ still_changing = 1;
+ while (still_changing) {
+ still_changing = 0;
+ for (i=1; i < 2*old_nr; i++) {
+ /* if <current_addr> > <last_addr>, swap */
+ /* or, if current=<start_addr> & last=<end_addr>, swap */
+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
+ ((change_point[i]->addr == change_point[i-1]->addr) &&
+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+ )
+ {
+ change_tmp = change_point[i];
+ change_point[i] = change_point[i-1];
+ change_point[i-1] = change_tmp;
+ still_changing=1;
+ }
+ }
+ }
+
+ /* create a new bios memory map, removing overlaps */
+ overlap_entries=0; /* number of entries in the overlap table */
+ new_bios_entry=0; /* index for creating new bios map entries */
+ last_type = 0; /* start with undefined memory type */
+ last_addr = 0; /* start with 0 as last starting address */
+ /* loop through change-points, determining affect on the new bios map */
+ for (chgidx=0; chgidx < 2*old_nr; chgidx++)
+ {
+ /* keep track of all overlapping bios entries */
+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+ {
+ /* add map entry to overlap list (> 1 entry implies an overlap) */
+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+ }
+ else
+ {
+ /* remove entry from list (order independent, so swap with last) */
+ for (i=0; i<overlap_entries; i++)
+ {
+ if (overlap_list[i] == change_point[chgidx]->pbios)
+ overlap_list[i] = overlap_list[overlap_entries-1];
+ }
+ overlap_entries--;
+ }
+ /* if there are overlapping entries, decide which "type" to use */
+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+ current_type = 0;
+ for (i=0; i<overlap_entries; i++)
+ if (overlap_list[i]->type > current_type)
+ current_type = overlap_list[i]->type;
+ /* continue building up new bios map based on this information */
+ if (current_type != last_type) {
+ if (last_type != 0) {
+ new_bios[new_bios_entry].size =
+ change_point[chgidx]->addr - last_addr;
+ /* move forward only if the new size was non-zero */
+ if (new_bios[new_bios_entry].size != 0)
+ if (++new_bios_entry >= E820MAX)
+ break; /* no more space left for new bios entries */
+ }
+ if (current_type != 0) {
+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
+ new_bios[new_bios_entry].type = current_type;
+ last_addr=change_point[chgidx]->addr;
+ }
+ last_type = current_type;
+ }
+ }
+ new_nr = new_bios_entry; /* retain count for new bios entries */
+
+ /* copy new bios mapping into original location */
+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+ *pnr_map = new_nr;
+
+ return 0;
+}
+
+/*
+ * Copy the BIOS e820 map into a safe place.
+ *
+ * Sanity-check it while we're at it..
+ *
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory. If we aren't, we'll fake a memory map.
+ *
+ * We check to see that the memory map contains at least 2 elements
+ * before we'll use it, because the detection code in setup.S may
+ * not be perfect and most every PC known to man has two memory
+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
+ * thinkpad 560x, for example, does not cooperate with the memory
+ * detection code.)
+ */
+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+{
+ /* Only one memory region (or negative)? Ignore it */
+ if (nr_map < 2)
+ return -1;
+
+ do {
+ unsigned long start = biosmap->addr;
+ unsigned long size = biosmap->size;
+ unsigned long end = start + size;
+ unsigned long type = biosmap->type;
+
+ /* Overflow in 64 bits? Ignore the memory map. */
+ if (start > end)
+ return -1;
+
+ /*
+ * Some BIOSes claim RAM in the 640k - 1M region.
+ * Not right. Fix it up.
+ *
+ * This should be removed on Hammer which is supposed to not
+ * have non e820 covered ISA mappings there, but I had some strange
+ * problems so it stays for now. -AK
+ */
+ if (type == E820_RAM) {
+ if (start < 0x100000ULL && end > 0xA0000ULL) {
+ if (start < 0xA0000ULL)
+ add_memory_region(start, 0xA0000ULL-start, type);
+ if (end <= 0x100000ULL)
+ continue;
+ start = 0x100000ULL;
+ size = end - start;
+ }
+ }
+
+ add_memory_region(start, size, type);
+ } while (biosmap++,--nr_map);
+ return 0;
+}
+
+void __init setup_memory_region(void)
+{
+ char *who = "BIOS-e820";
+
+ /*
+ * Try to copy the BIOS-supplied E820-map.
+ *
+ * Otherwise fake a memory map; one section from 0k->640k,
+ * the next section from 1mb->appropriate_mem_k
+ */
+ sanitize_e820_map(E820_MAP, &E820_MAP_NR);
+ if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
+ unsigned long mem_size;
+
+ /* compare results from other methods and take the greater */
+ if (ALT_MEM_K < EXT_MEM_K) {
+ mem_size = EXT_MEM_K;
+ who = "BIOS-88";
+ } else {
+ mem_size = ALT_MEM_K;
+ who = "BIOS-e801";
+ }
+
+ e820.nr_map = 0;
+ add_memory_region(0, LOWMEMSIZE(), E820_RAM);
+ add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
+ }
+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+ e820_print_map(who);
+}
+#endif
+
+extern unsigned long xen_override_max_pfn;
+
+void __init parse_memopt(char *p, char **from)
+{
+ /*
+ * mem=XXX[kKmM] limits kernel memory to XXX+1MB
+ *
+ * It would be more logical to count from 0 instead of from
+ * HIGH_MEMORY, but we keep that for now for i386 compatibility.
+ *
+ * No support for custom mapping like i386. The reason is
+ * that we need to read the e820 map anyways to handle the
+ * ACPI mappings in the direct map. Also on x86-64 there
+ * should be always a good e820 map. This is only an upper
+ * limit, you cannot force usage of memory not in e820.
+ *
+ * -AK
+ */
+ end_user_pfn = memparse(p, from) + HIGH_MEMORY;
+ end_user_pfn >>= PAGE_SHIFT;
+ xen_override_max_pfn = (unsigned long) end_user_pfn;
+}
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S
new file mode 100644
index 0000000000..dbb4f58821
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S
@@ -0,0 +1,1181 @@
+/*
+ * linux/arch/x86_64/entry.S
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
+ *
+ * $Id$
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Asit Mallick <asit.k.mallick@intel.com>
+ * Modified for Xen
+ */
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after an interrupt and after each system call.
+ *
+ * Normal syscalls and interrupts don't save a full stack frame, this is
+ * only done for syscall tracing, signals or fork/exec et.al.
+ *
+ * A note on terminology:
+ * - top of stack: Architecture defined interrupt frame from SS to RIP
+ * at the top of the kernel process stack.
+ * - partial stack frame: partially saved registers upto R11.
+ * - full stack frame: Like partial stack frame, but all register saved.
+ *
+ * TODO:
+ * - schedule it carefully for the final hardware.
+ */
+
+#define ASSEMBLY 1
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/smp.h>
+#include <asm/cache.h>
+#include <asm/errno.h>
+#include <asm/dwarf2.h>
+#include <asm/calling.h>
+#include <asm/asm_offset.h>
+#include <asm/msr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/hw_irq.h>
+#include <asm/errno.h>
+#include <asm-xen/xen-public/arch-x86_64.h>
+
+
+EVENT_MASK = (CS+4)
+VGCF_IN_SYSCALL = (1<<8)
+
+/*
+ * Copied from arch/xen/i386/kernel/entry.S
+ */
+/* Offsets into shared_info_t. */
+#define evtchn_upcall_pending 0
+#define evtchn_upcall_mask 1
+
+#define sizeof_vcpu_shift 3
+
+#ifdef CONFIG_SMP
+#define XEN_GET_VCPU_INFO(reg)
+#define preempt_disable(reg) incl TI_preempt_count(reg)
+#define preempt_enable(reg) decl TI_preempt_count(reg)
+#define XEN_LOCK_VCPU_INFO_SMP(reg) preempt_disable(%rbp) ; \
+ movl TI_cpu(%rbp),reg ; \
+ shl $sizeof_vcpu_shift,reg ; \
+ addl HYPERVISOR_shared_info,reg
+#define XEN_UNLOCK_VCPU_INFO_SMP(reg) preempt_enable(%rbp)
+#define XEN_UNLOCK_VCPU_INFO_SMP_fixup .byte 0xff,0xff,0xff
+#define Ux00 0xff
+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+#define XEN_BLOCK_EVENTS(reg) XEN_LOCK_VCPU_INFO_SMP(reg) ; \
+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
+ XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#define XEN_UNBLOCK_EVENTS(reg) XEN_LOCK_VCPU_INFO_SMP(reg) ; \
+ movb $0,evtchn_upcall_mask(reg) ; \
+ XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) GET_THREAD_INFO(%ebp) ; \
+ XEN_LOCK_VCPU_INFO_SMP(reg) ; \
+ movb evtchn_upcall_mask(reg), tmp ; \
+ movb tmp, off(%rsp) ; \
+ XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#else
+#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
+#define XEN_LOCK_VCPU_INFO_SMP(reg) movq HYPERVISOR_shared_info,reg
+#define XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#define XEN_UNLOCK_VCPU_INFO_SMP_fixup
+#define Ux00 0x00
+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+#define XEN_BLOCK_EVENTS(reg) XEN_LOCKED_BLOCK_EVENTS(reg)
+#define XEN_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \
+ movb evtchn_upcall_mask(reg), tmp; \
+ movb tmp, off(%rsp)
+#endif
+
+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
+
+ .code64
+
+#ifdef CONFIG_PREEMPT
+#define preempt_stop XEN_BLOCK_EVENTS(%rsi)
+#else
+#define preempt_stop
+#define retint_kernel retint_restore_args
+#endif
+
+
+/*
+ * C code is not supposed to know about undefined top of stack. Every time
+ * a C function with an pt_regs argument is called from the SYSCALL based
+ * fast path FIXUP_TOP_OF_STACK is needed.
+ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
+ * manipulation.
+ */
+
+
+ .macro FAKE_STACK_FRAME child_rip
+ /* push in order ss, rsp, eflags, cs, rip */
+ xorq %rax, %rax
+ pushq %rax /* ss */
+ CFI_ADJUST_CFA_OFFSET 8
+ pushq %rax /* rsp */
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_OFFSET rip,0
+ pushq $(1<<9) /* eflags - interrupts on */
+ CFI_ADJUST_CFA_OFFSET 8
+ pushq $__KERNEL_CS /* cs */
+ CFI_ADJUST_CFA_OFFSET 8
+ pushq \child_rip /* rip */
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_OFFSET rip,0
+ pushq %rax /* orig rax */
+ CFI_ADJUST_CFA_OFFSET 8
+ .endm
+
+ .macro UNFAKE_STACK_FRAME
+ addq $8*6, %rsp
+ CFI_ADJUST_CFA_OFFSET -(6*8)
+ .endm
+
+ .macro CFI_DEFAULT_STACK
+ CFI_ADJUST_CFA_OFFSET (SS)
+ CFI_OFFSET r15,R15-SS
+ CFI_OFFSET r14,R14-SS
+ CFI_OFFSET r13,R13-SS
+ CFI_OFFSET r12,R12-SS
+ CFI_OFFSET rbp,RBP-SS
+ CFI_OFFSET rbx,RBX-SS
+ CFI_OFFSET r11,R11-SS
+ CFI_OFFSET r10,R10-SS
+ CFI_OFFSET r9,R9-SS
+ CFI_OFFSET r8,R8-SS
+ CFI_OFFSET rax,RAX-SS
+ CFI_OFFSET rcx,RCX-SS
+ CFI_OFFSET rdx,RDX-SS
+ CFI_OFFSET rsi,RSI-SS
+ CFI_OFFSET rdi,RDI-SS
+ CFI_OFFSET rsp,RSP-SS
+ CFI_OFFSET rip,RIP-SS
+ .endm
+
+ /*
+ * Must be consistent with the definition in arch_x86_64.h:
+ * struct switch_to_user {
+ * u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
+ * } PACKED;
+ * #define VGCF_IN_SYSCALL (1<<8)
+ */
+ .macro SWITCH_TO_USER flag
+ movl $0,%gs:pda_kernel_mode # change to user mode
+ subq $8*4,%rsp # reuse rip, cs, rflags, rsp, ss in the stack
+ movq %rax,(%rsp)
+ movq %r11,1*8(%rsp)
+ movq %rcx,2*8(%rsp) # we saved %rcx upon exceptions
+ movq $\flag,3*8(%rsp)
+ movq $__USER_CS,5*8(%rsp)
+ movq $__USER_DS,8*8(%rsp)
+ movq $__HYPERVISOR_switch_to_user,%rax
+ syscall
+ .endm
+
+ .macro SWITCH_TO_KERNEL ssoff,adjust=0
+ btsq $0,%gs:pda_kernel_mode
+ jc 1f
+ orb $1,\ssoff-\adjust+4(%rsp)
+1:
+ .endm
+
+/*
+ * A newly forked process directly context switches into this.
+ */
+/* rdi: prev */
+ENTRY(ret_from_fork)
+ CFI_STARTPROC
+ CFI_DEFAULT_STACK
+ call schedule_tail
+ GET_THREAD_INFO(%rcx)
+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
+ jnz rff_trace
+rff_action:
+ RESTORE_REST
+ testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
+ je int_ret_from_sys_call
+ testl $_TIF_IA32,threadinfo_flags(%rcx)
+ jnz int_ret_from_sys_call
+ jmp ret_from_sys_call
+rff_trace:
+ movq %rsp,%rdi
+ call syscall_trace_leave
+ GET_THREAD_INFO(%rcx)
+ jmp rff_action
+ CFI_ENDPROC
+
+/*
+ * System call entry. Upto 6 arguments in registers are supported.
+ *
+ * SYSCALL does not save anything on the stack and does not change the
+ * stack pointer.
+ */
+
+/*
+ * Register setup:
+ * rax system call number
+ * rdi arg0
+ * rcx return address for syscall/sysret, C arg3
+ * rsi arg1
+ * rdx arg2
+ * r10 arg3 (--> moved to rcx for C)
+ * r8 arg4
+ * r9 arg5
+ * r11 eflags for syscall/sysret, temporary for C
+ * r12-r15,rbp,rbx saved by C code, not touched.
+ *
+ * Interrupts are off on entry.
+ * Only called from user space.
+ *
+ * XXX if we had a free scratch register we could save the RSP into the stack frame
+ * and report it properly in ps. Unfortunately we haven't.
+ */
+
+ENTRY(system_call)
+ CFI_STARTPROC
+ SAVE_ARGS -8,0
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_SAVE_UPCALL_MASK(%r11,%cl,EVENT_MASK-ARGOFFSET) # saved %rcx
+ XEN_UNBLOCK_EVENTS(%r11)
+ GET_THREAD_INFO(%rcx)
+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
+ jnz tracesys
+ cmpq $__NR_syscall_max,%rax
+ ja badsys
+ movq %r10,%rcx
+ call *sys_call_table(,%rax,8) # XXX: rip relative
+ movq %rax,RAX-ARGOFFSET(%rsp)
+/*
+ * Syscall return path ending with SYSRET (fast path)
+ * Has incomplete stack frame and undefined top of stack.
+ */
+ .globl ret_from_sys_call
+ret_from_sys_call:
+ movl $_TIF_WORK_MASK,%edi
+ /* edi: flagmask */
+sysret_check:
+ GET_THREAD_INFO(%rcx)
+ XEN_GET_VCPU_INFO(%rsi)
+ XEN_BLOCK_EVENTS(%rsi)
+ movl threadinfo_flags(%rcx),%edx
+ andl %edi,%edx
+ jnz sysret_careful
+ XEN_UNBLOCK_EVENTS(%rsi)
+ RESTORE_ARGS 0,8,0
+ SWITCH_TO_USER VGCF_IN_SYSCALL
+
+ /* Handle reschedules */
+ /* edx: work, edi: workmask */
+sysret_careful:
+ bt $TIF_NEED_RESCHED,%edx
+ jnc sysret_signal
+ XEN_GET_VCPU_INFO(%rsi)
+ XEN_BLOCK_EVENTS(%rsi)
+ pushq %rdi
+ call schedule
+ popq %rdi
+ jmp sysret_check
+
+ /* Handle a signal */
+sysret_signal:
+/* sti */
+ XEN_GET_VCPU_INFO(%rsi)
+ XEN_UNBLOCK_EVENTS(%rsi)
+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
+ jz 1f
+
+ /* Really a signal */
+ /* edx: work flags (arg3) */
+ leaq do_notify_resume(%rip),%rax
+ leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
+ xorl %esi,%esi # oldset -> arg2
+ call ptregscall_common
+1: movl $_TIF_NEED_RESCHED,%edi
+ jmp sysret_check
+
+ /* Do syscall tracing */
+tracesys:
+ SAVE_REST
+ movq $-ENOSYS,RAX(%rsp)
+ movq %rsp,%rdi
+ call syscall_trace_enter
+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ cmpq $__NR_syscall_max,%rax
+ ja 1f
+ movq %r10,%rcx /* fixup for C */
+ call *sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
+1: SAVE_REST
+ movq %rsp,%rdi
+ call syscall_trace_leave
+ RESTORE_REST
+ jmp ret_from_sys_call
+
+badsys:
+ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
+ jmp ret_from_sys_call
+
+/*
+ * Syscall return path ending with IRET.
+ * Has correct top of stack, but partial stack frame.
+ */
+ENTRY(int_ret_from_sys_call)
+ XEN_GET_VCPU_INFO(%rsi)
+ XEN_BLOCK_EVENTS(%rsi)
+ testb $3,CS-ARGOFFSET(%rsp)
+ jnz 1f
+ /* Need to set the proper %ss (not NULL) for ring 3 iretq */
+ movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
+ jmp retint_restore_args # retrun from ring3 kernel
+1:
+ movl $_TIF_ALLWORK_MASK,%edi
+ /* edi: mask to check */
+int_with_check:
+ GET_THREAD_INFO(%rcx)
+ movl threadinfo_flags(%rcx),%edx
+ andl %edi,%edx
+ jnz int_careful
+ jmp retint_restore_args
+
+ /* Either reschedule or signal or syscall exit tracking needed. */
+ /* First do a reschedule test. */
+ /* edx: work, edi: workmask */
+int_careful:
+ bt $TIF_NEED_RESCHED,%edx
+ jnc int_very_careful
+/* sti */
+ XEN_GET_VCPU_INFO(%rsi)
+ XEN_UNBLOCK_EVENTS(%rsi)
+ pushq %rdi
+ call schedule
+ popq %rdi
+ jmp int_with_check
+
+ /* handle signals and tracing -- both require a full stack frame */
+int_very_careful:
+/* sti */
+ XEN_GET_VCPU_INFO(%rsi)
+ XEN_UNBLOCK_EVENTS(%rsi)
+ SAVE_REST
+ /* Check for syscall exit trace */
+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
+ jz int_signal
+ pushq %rdi
+ leaq 8(%rsp),%rdi # &ptregs -> arg1
+ call syscall_trace_leave
+ popq %rdi
+ btr $TIF_SYSCALL_TRACE,%edi
+ btr $TIF_SYSCALL_AUDIT,%edi
+ btr $TIF_SINGLESTEP,%edi
+ jmp int_restore_rest
+
+int_signal:
+ testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
+ jz 1f
+ movq %rsp,%rdi # &ptregs -> arg1
+ xorl %esi,%esi # oldset -> arg2
+ call do_notify_resume
+1: movl $_TIF_NEED_RESCHED,%edi
+int_restore_rest:
+ RESTORE_REST
+ jmp int_with_check
+ CFI_ENDPROC
+
+/*
+ * Certain special system calls that need to save a complete full stack frame.
+ */
+
+ .macro PTREGSCALL label,func,arg
+ .globl \label
+\label:
+ leaq \func(%rip),%rax
+ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
+ jmp ptregscall_common
+ .endm
+
+ PTREGSCALL stub_clone, sys_clone, %r8
+ PTREGSCALL stub_fork, sys_fork, %rdi
+ PTREGSCALL stub_vfork, sys_vfork, %rdi
+ PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
+ PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
+ PTREGSCALL stub_iopl, sys_iopl, %rsi
+
+ENTRY(ptregscall_common)
+ CFI_STARTPROC
+ popq %r11
+ CFI_ADJUST_CFA_OFFSET -8
+ SAVE_REST
+ movq %r11, %r15
+ call *%rax
+ movq %r15, %r11
+ RESTORE_REST
+ pushq %r11
+ CFI_ADJUST_CFA_OFFSET 8
+ ret
+ CFI_ENDPROC
+
+ENTRY(stub_execve)
+ CFI_STARTPROC
+ popq %r11
+ CFI_ADJUST_CFA_OFFSET -8
+ SAVE_REST
+ movq %r11, %r15
+ call sys_execve
+ GET_THREAD_INFO(%rcx)
+ bt $TIF_IA32,threadinfo_flags(%rcx)
+ jc exec_32bit
+ movq %r15, %r11
+ RESTORE_REST
+ push %r11
+ ret
+
+exec_32bit:
+ CFI_ADJUST_CFA_OFFSET REST_SKIP
+ movq %rax,RAX(%rsp)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+
+/*
+ * sigreturn is special because it needs to restore all registers on return.
+ * This cannot be done with SYSRET, so use the IRET return path instead.
+ */
+ENTRY(stub_rt_sigreturn)
+ CFI_STARTPROC
+ addq $8, %rsp
+ SAVE_REST
+ movq %rsp,%rdi
+ call sys_rt_sigreturn
+ movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+
+
+/*
+ * Interrupt entry/exit.
+ *
+ * Interrupt entry points save only callee clobbered registers in fast path.
+ *
+ * Entry runs with interrupts off.
+ */
+
+/* 0(%rsp): interrupt number */
+ .macro interrupt func
+ CFI_STARTPROC simple
+ CFI_DEF_CFA rsp,(SS-RDI)
+ CFI_REL_OFFSET rsp,(RSP-ORIG_RAX)
+ CFI_REL_OFFSET rip,(RIP-ORIG_RAX)
+ cld
+#ifdef CONFIG_DEBUG_INFO
+ SAVE_ALL
+ movq %rsp,%rdi
+ /*
+ * Setup a stack frame pointer. This allows gdb to trace
+ * back to the original stack.
+ */
+ movq %rsp,%rbp
+ CFI_DEF_CFA_REGISTER rbp
+#else
+ SAVE_ARGS
+ leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
+#endif
+#if 0 /* For Xen we don't need to do this */
+ testl $3,CS(%rdi)
+ je 1f
+ swapgs
+#endif
+1: addl $1,%gs:pda_irqcount # RED-PEN should check preempt count
+ movq %gs:pda_irqstackptr,%rax
+ cmoveq %rax,%rsp
+ pushq %rdi # save old stack
+ call \func
+ .endm
+
+retint_check:
+ movl threadinfo_flags(%rcx),%edx
+ andl %edi,%edx
+ jnz retint_careful
+retint_restore_args:
+ RESTORE_ARGS 0,8,0
+ testb $3,8(%rsp) # check CS
+ jnz user_mode
+kernel_mode:
+ orb $3,1*8(%rsp)
+ iretq
+user_mode:
+ SWITCH_TO_USER 0
+
+ /* edi: workmask, edx: work */
+retint_careful:
+ bt $TIF_NEED_RESCHED,%edx
+ jnc retint_signal
+ XEN_GET_VCPU_INFO(%rsi)
+ XEN_UNBLOCK_EVENTS(%rsi)
+/* sti */
+ pushq %rdi
+ call schedule
+ popq %rdi
+ XEN_GET_VCPU_INFO(%rsi)
+ XEN_BLOCK_EVENTS(%rsi)
+ GET_THREAD_INFO(%rcx)
+/* cli */
+ jmp retint_check
+
+retint_signal:
+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
+ jz retint_restore_args
+ XEN_GET_VCPU_INFO(%rsi)
+ XEN_UNBLOCK_EVENTS(%rsi)
+ SAVE_REST
+ movq $-1,ORIG_RAX(%rsp)
+ xorq %rsi,%rsi # oldset
+ movq %rsp,%rdi # &pt_regs
+ call do_notify_resume
+ RESTORE_REST
+ XEN_GET_VCPU_INFO(%rsi)
+ XEN_BLOCK_EVENTS(%rsi)
+ movl $_TIF_NEED_RESCHED,%edi
+ GET_THREAD_INFO(%rcx)
+ jmp retint_check
+
+#ifdef CONFIG_PREEMPT
+ /* Returning to kernel space. Check if we need preemption */
+ /* rcx: threadinfo. interrupts off. */
+ .p2align
+retint_kernel:
+ cmpl $0,threadinfo_preempt_count(%rcx)
+ jnz retint_restore_args
+ bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
+ jnc retint_restore_args
+ bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
+ jc retint_restore_args
+ movl $PREEMPT_ACTIVE,threadinfo_preempt_count(%rcx)
+/* sti */
+ XEN_GET_VCPU_INFO(%rsi)
+ XEN_UNBLOCK_EVENTS(%rsi)
+ call schedule
+ XEN_GET_VCPU_INFO(%rsi) /* %esi can be different */
+ XEN_BLOCK_EVENTS(%rsi)
+/* cli */
+ GET_THREAD_INFO(%rcx)
+ movl $0,threadinfo_preempt_count(%rcx)
+ jmp retint_kernel /* check again */
+#endif
+ CFI_ENDPROC
+
+/*
+ * APIC interrupts.
+ */
+ .macro apicinterrupt num,func
+ pushq $\num-256
+ interrupt \func
+ jmp ret_from_intr
+ CFI_ENDPROC
+ .endm
+
+#ifdef CONFIG_SMP
+ENTRY(reschedule_interrupt)
+ apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
+
+ENTRY(invalidate_interrupt)
+ apicinterrupt INVALIDATE_TLB_VECTOR,smp_invalidate_interrupt
+
+ENTRY(call_function_interrupt)
+ apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ENTRY(apic_timer_interrupt)
+ apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
+
+ENTRY(error_interrupt)
+ apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
+
+ENTRY(spurious_interrupt)
+ apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
+#endif
+
+/*
+ * Exception entry points.
+ */
+ .macro zeroentry sym
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* skip rcx and r11 */
+ pushq $0 /* push error code/oldrax */
+ pushq %rax /* push real oldrax to the rdi slot */
+ leaq \sym(%rip),%rax
+ jmp error_entry
+ .endm
+
+ .macro errorentry sym
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x18,%rsp /* rsp points to the error code */
+ pushq %rax
+ leaq \sym(%rip),%rax
+ jmp error_entry
+ .endm
+
+ /* error code is on the stack already */
+ /* handle NMI like exceptions that can happen everywhere */
+ .macro paranoidentry sym
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* skip rcx and r11 */
+ SAVE_ALL
+ cld
+ movl $1,%ebx
+ movl $MSR_GS_BASE,%ecx
+ rdmsr
+ testl %edx,%edx
+ js 1f
+/* swapgs */
+ xorl %ebx,%ebx
+1: movq %rsp,%rdi
+ movq ORIG_RAX(%rsp),%rsi
+ movq $-1,ORIG_RAX(%rsp)
+ call \sym
+ .endm
+
+/*
+ * Exception entry point. This expects an error code/orig_rax on the stack
+ * and the exception handler in %rax.
+ */
+ENTRY(error_entry)
+ CFI_STARTPROC simple
+ CFI_DEF_CFA rsp,(SS-RDI)
+ CFI_REL_OFFSET rsp,(RSP-RDI)
+ CFI_REL_OFFSET rip,(RIP-RDI)
+ /* rdi slot contains rax, oldrax contains error code */
+ cld
+ subq $14*8,%rsp
+ CFI_ADJUST_CFA_OFFSET (14*8)
+ movq %rsi,13*8(%rsp)
+ CFI_REL_OFFSET rsi,RSI
+ movq 14*8(%rsp),%rsi /* load rax from rdi slot */
+ movq %rdx,12*8(%rsp)
+ CFI_REL_OFFSET rdx,RDX
+ movq %rcx,11*8(%rsp)
+ CFI_REL_OFFSET rcx,RCX
+ movq %rsi,10*8(%rsp) /* store rax */
+ CFI_REL_OFFSET rax,RAX
+ movq %r8, 9*8(%rsp)
+ CFI_REL_OFFSET r8,R8
+ movq %r9, 8*8(%rsp)
+ CFI_REL_OFFSET r9,R9
+ movq %r10,7*8(%rsp)
+ CFI_REL_OFFSET r10,R10
+ movq %r11,6*8(%rsp)
+ CFI_REL_OFFSET r11,R11
+ movq %rbx,5*8(%rsp)
+ CFI_REL_OFFSET rbx,RBX
+ movq %rbp,4*8(%rsp)
+ CFI_REL_OFFSET rbp,RBP
+ movq %r12,3*8(%rsp)
+ CFI_REL_OFFSET r12,R12
+ movq %r13,2*8(%rsp)
+ CFI_REL_OFFSET r13,R13
+ movq %r14,1*8(%rsp)
+ CFI_REL_OFFSET r14,R14
+ movq %r15,(%rsp)
+ CFI_REL_OFFSET r15,R15
+#if 0
+ cmpl $__KERNEL_CS,CS(%rsp)
+ je error_kernelspace
+#endif
+error_call_handler:
+ movq %rdi, RDI(%rsp)
+ movq %rsp,%rdi
+ movq ORIG_RAX(%rsp),%rsi # get error code
+ movq $-1,ORIG_RAX(%rsp)
+ leaq do_hypervisor_callback,%rcx
+ cmpq %rax,%rcx
+ je 0f # don't save event mask for callbacks
+ XEN_GET_VCPU_INFO(%r11)
+ XEN_SAVE_UPCALL_MASK(%r11,%cl,EVENT_MASK)
+0:
+ call *%rax
+error_check_event:
+ movb EVENT_MASK(%rsp), %al
+ notb %al # %al == ~saved_mask
+ XEN_LOCK_VCPU_INFO_SMP(%rsi)
+ andb evtchn_upcall_mask(%rsi),%al
+ andb $1,%al # %al == mask & ~saved_mask
+ jnz restore_all_enable_events # != 0 => reenable event delivery
+ XEN_UNLOCK_VCPU_INFO_SMP(%rsi)
+error_exit:
+ RESTORE_REST
+/* cli */
+ GET_THREAD_INFO(%rcx)
+ testb $3,CS-REST_SKIP(%rsp)
+ jz retint_kernel
+ movl threadinfo_flags(%rcx),%edx
+ movl $_TIF_WORK_MASK,%edi
+ andl %edi,%edx
+ jnz retint_careful
+ RESTORE_ARGS 0,8,0
+ SWITCH_TO_USER 0
+ CFI_ENDPROC
+
+error_kernelspace:
+ /*
+ * We need to re-write the logic here because we don't do iretq to
+ * to return to user mode. It's still possible that we get trap/fault
+ * in the kernel (when accessing buffers pointed to by system calls,
+ * for example).
+ *
+ */
+#if 0
+ incl %ebx
+ /* There are two places in the kernel that can potentially fault with
+ usergs. Handle them here. The exception handlers after
+ iret run with kernel gs again, so don't set the user space flag.
+ B stepping K8s sometimes report an truncated RIP for IRET
+ exceptions returning to compat mode. Check for these here too. */
+ leaq iret_label(%rip),%rbp
+ cmpq %rbp,RIP(%rsp)
+ je error_swapgs
+ movl %ebp,%ebp /* zero extend */
+ cmpq %rbp,RIP(%rsp)
+ je error_swapgs
+ cmpq $gs_change,RIP(%rsp)
+ je error_swapgs
+ jmp error_sti
+#endif
+
+ENTRY(hypervisor_callback)
+ zeroentry do_hypervisor_callback
+
+/*
+ * Copied from arch/xen/i386/kernel/entry.S
+ */
+# A note on the "critical region" in our callback handler.
+# We want to avoid stacking callback handlers due to events occurring
+# during handling of the last event. To do this, we keep events disabled
+# until we've done all processing. HOWEVER, we must enable events before
+# popping the stack frame (can't be done atomically) and so it would still
+# be possible to get enough handler activations to overflow the stack.
+# Although unlikely, bugs of that kind are hard to track down, so we'd
+# like to avoid the possibility.
+# So, on entry to the handler we detect whether we interrupted an
+# existing activation in its critical region -- if so, we pop the current
+# activation and restart the handler using the previous one.
+
+ENTRY(do_hypervisor_callback) # do_hyperviosr_callback(struct *pt_regs)
+# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
+# see the correct pointer to the pt_regs
+ addq $8, %rsp # we don't return, adjust the stack frame
+ movq RIP(%rsp),%rax
+ cmpq $scrit,%rax
+ jb 11f
+ cmpq $ecrit,%rax
+ jb critical_region_fixup
+11: movb $0, EVENT_MASK(%rsp)
+ call evtchn_do_upcall
+ jmp error_check_event
+
+ ALIGN
+restore_all_enable_events:
+ XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
+scrit: /**** START OF CRITICAL REGION ****/
+ XEN_TEST_PENDING(%rsi)
+ jnz 14f # process more events if necessary...
+ XEN_UNLOCK_VCPU_INFO_SMP(%rsi)
+ RESTORE_REST
+ RESTORE_ARGS 0,8,0
+ testb $3,8(%rsp) # check CS
+ jnz crit_user_mode
+ orb $3,1*8(%rsp)
+ iretq
+crit_user_mode:
+ SWITCH_TO_USER 0
+
+14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
+ XEN_UNLOCK_VCPU_INFO_SMP(%rsi)
+ movq %rsp,%rdi # set the argument again
+ jmp 11b
+ecrit: /**** END OF CRITICAL REGION ****/
+# [How we do the fixup]. We want to merge the current stack frame with the
+# just-interrupted frame. How we do this depends on where in the critical
+# region the interrupted handler was executing, and so how many saved
+# registers are in each frame. We do this quickly using the lookup table
+# 'critical_fixup_table'. For each byte offset in the critical region, it
+# provides the number of bytes which have already been popped from the
+# interrupted stack frame.
+critical_region_fixup:
+ subq $scrit,%rax
+ shlq $1,%rax
+ addq $critical_fixup_table,%rax
+ movzwq (%rax),%rcx
+ xorq %rax,%rax
+ movb %ch,%al
+ movb $0,%ch
+#ifdef CONFIG_SMP
+ cmpb $0xff,%al
+ jne 15f
+ add $1,%al
+ GET_THREAD_INFO(%rbp)
+ XEN_UNLOCK_VCPU_INFO_SMP(%r11)
+15:
+#endif
+ movq %rsp,%rsi
+ movq %rsi,%rdi
+ addq $0xa8,%rax
+ addq %rax,%rdi
+ addq %rcx,%rsi
+ shrq $3,%rcx # convert words to bytes
+ je 17f # skip loop if nothing to copy
+16: subq $8,%rsi # pre-decrementing copy loop
+ subq $8,%rdi
+ movq (%rsi),%rax
+ movq %rax,(%rdi)
+ loop 16b
+17: movq %rdi,%rsp # final %edi is top of merged stack
+ jmp 11b
+
+critical_fixup_table:
+ .byte 0x00,0x00,0x00,0x00 # testb $0xff,0x0(%rsi)
+ .byte 0x00,0x00,0x00,0x00,0x00,0x00 # jne <crit_user_mode+0x42>
+ .byte 0x00,0x00,0x00,0x00 # mov (%rsp),%r15
+ .byte 0x00,0x00,0x00,0x00,0x00 # mov 0x8(%rsp),%r14
+ .byte 0x00,0x00,0x00,0x00,0x00 # mov 0x10(%rsp),%r13
+ .byte 0x00,0x00,0x00,0x00,0x00 # mov 0x18(%rsp),%r12
+ .byte 0x00,0x00,0x00,0x00,0x00 # mov 0x20(%rsp),%rbp
+ .byte 0x00,0x00,0x00,0x00,0x00 # mov 0x28(%rsp),%rbx
+ .byte 0x00,0x00,0x00,0x00 # add $0x30,%rsp
+ .byte 0x30,0x30,0x30,0x30 # mov (%rsp),%r11
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x8(%rsp),%r10
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x10(%rsp),%r9
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x18(%rsp),%r8
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x20(%rsp),%rax
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x28(%rsp),%rcx
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x30(%rsp),%rdx
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x38(%rsp),%rsi
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x40(%rsp),%rdi
+ .byte 0x30,0x30,0x30,0x30 # add $0x50,%rsp
+ .byte 0x80,0x80,0x80,0x80,0x80 # testb $0x3,0x8(%rsp)
+ .byte 0x80,0x80 # jne ffffffff8010dc25 <crit_user_mode>
+ .byte 0x80,0x80,0x80,0x80 # orb $0x3,0x8(%rsp)
+ .byte 0x80,0x80 # iretq
+ # <crit_user_mode>:
+ .byte 0x80,0x80,0x80,0x80,0x80,0x80,0x80 # movq $0x0,%gs:0x60
+ .byte 0x80,0x80,0x80,0x80,0x80
+ .byte 0x80,0x80,0x80,0x80 # sub $0x20,%rsp
+ .byte 0x60,0x60,0x60,0x60 # mov %rax,(%rsp)
+ .byte 0x60,0x60,0x60,0x60,0x60 # mov %r11,0x8(%rsp)
+ .byte 0x60,0x60,0x60,0x60,0x60 # mov %rcx,0x10(%rsp)
+ .byte 0x60,0x60,0x60,0x60,0x60,0x60,0x60 # movq $0x0,0x18(%rsp)
+ .byte 0x60,0x60
+ .byte 0x60,0x60,0x60,0x60,0x60,0x60,0x60 # movq $0x33,0x28(%rsp)
+ .byte 0x60,0x60
+ .byte 0x60,0x60,0x60,0x60,0x60,0x60,0x60 # movq $0x2b,0x40(%rsp)
+ .byte 0x60,0x60
+ .byte 0x60,0x60,0x60,0x60,0x60,0x60,0x60 # mov $0x17,%rax
+ .byte 0x60,0x60 # syscall
+ .byte 0x60,0x60,0x60,0x60,0x60 # movb $0x1,0x1(%rsi)
+ .byte 0x60,0x60,0x60 # mov %rsp,%rdi
+ .byte 0x60,0x60,0x60,0x60,0x60 # jmpq <do_hypervisor_callback+0x20>
+# Hypervisor uses this for application faults while it executes.
+ENTRY(failsafe_callback)
+ hlt
+#if 0
+1: movl (%rsp),%ds
+2: movl 8(%rsp),%es
+3: movl 16(%rsp),%fs
+4: movl 24(%rsp),%gs
+ subq $14,%rsp
+ SAVE_ALL
+ jmp ret_from_exception
+.section .fixup,"ax"; \
+6: movq $0,(%rsp); \
+ jmp 1b; \
+7: movq $0,(%rsp); \
+ jmp 2b; \
+8: movq $0,(%rsp); \
+ jmp 3b; \
+9: movq $0,(%rsp); \
+ jmp 4b; \
+.previous; \
+.section __ex_table,"a";\
+ .align 8; \
+ .long 1b,6b; \
+ .long 2b,7b; \
+ .long 3b,8b; \
+ .long 4b,9b; \
+.previous
+
+ .section __ex_table,"a"
+ .align 8
+ .quad gs_change,bad_gs
+ .previous
+ .section .fixup,"ax"
+ /* running with kernelgs */
+bad_gs:
+/* swapgs */ /* switch back to user gs */
+ xorl %eax,%eax
+ movl %eax,%gs
+ jmp 2b
+ .previous
+#endif
+/*
+ * Create a kernel thread.
+ *
+ * C extern interface:
+ * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ *
+ * asm input arguments:
+ * rdi: fn, rsi: arg, rdx: flags
+ */
+ENTRY(kernel_thread)
+ CFI_STARTPROC
+ FAKE_STACK_FRAME $child_rip
+ SAVE_ALL
+
+ # rdi: flags, rsi: usp, rdx: will be &pt_regs
+ movq %rdx,%rdi
+ orq kernel_thread_flags(%rip),%rdi
+ movq $-1, %rsi
+ movq %rsp, %rdx
+
+ xorl %r8d,%r8d
+ xorl %r9d,%r9d
+
+ # clone now
+ call do_fork
+ movq %rax,RAX(%rsp)
+ xorl %edi,%edi
+
+ /*
+ * It isn't worth to check for reschedule here,
+ * so internally to the x86_64 port you can rely on kernel_thread()
+ * not to reschedule the child before returning, this avoids the need
+ * of hacks for example to fork off the per-CPU idle tasks.
+ * [Hopefully no generic code relies on the reschedule -AK]
+ */
+ RESTORE_ALL
+ UNFAKE_STACK_FRAME
+ ret
+ CFI_ENDPROC
+
+
+child_rip:
+ /*
+ * Here we are in the child and the registers are set as they were
+ * at kernel_thread() invocation in the parent.
+ */
+ movq %rdi, %rax
+ movq %rsi, %rdi
+ call *%rax
+ # exit
+ xorq %rdi, %rdi
+ call do_exit
+
+/*
+ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
+ *
+ * C extern interface:
+ * extern long execve(char *name, char **argv, char **envp)
+ *
+ * asm input arguments:
+ * rdi: name, rsi: argv, rdx: envp
+ *
+ * We want to fallback into:
+ * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
+ *
+ * do_sys_execve asm fallback arguments:
+ * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
+ */
+ENTRY(execve)
+ CFI_STARTPROC
+ FAKE_STACK_FRAME $0
+ SAVE_ALL
+ call sys_execve
+ movq %rax, RAX(%rsp)
+ RESTORE_REST
+ testq %rax,%rax
+ jne 1f
+ jmp int_ret_from_sys_call
+1: RESTORE_ARGS
+ UNFAKE_STACK_FRAME
+ ret
+ CFI_ENDPROC
+
+
+ /*
+ * Copy error_entry because of the different stack frame
+ */
+ENTRY(page_fault)
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp # now %rsp points to %cr2
+ pushq %rax
+ leaq do_page_fault(%rip),%rax
+ cld
+ subq $13*8,%rsp
+ movq %rdx,12*8(%rsp) # save %rdx
+ movq 13*8(%rsp),%rdx # load rax
+ movq %rcx,11*8(%rsp)
+ movq %rdx,10*8(%rsp) # store rax
+ movq %rsi,13*8(%rsp) # now save %rsi
+ movq 14*8(%rsp),%rdx # load %cr2, 3rd argument
+ movq %r8, 9*8(%rsp)
+ movq %r9, 8*8(%rsp)
+ movq %r10,7*8(%rsp)
+ movq %r11,6*8(%rsp)
+ movq %rbx,5*8(%rsp)
+ movq %rbp,4*8(%rsp)
+ movq %r12,3*8(%rsp)
+ movq %r13,2*8(%rsp)
+ movq %r14,1*8(%rsp)
+ movq %r15,(%rsp)
+#if 0
+ cmpl $__KERNEL_CS,CS(%rsp)
+ je error_kernelspace
+#endif
+ /*
+ * 1st and 2nd arguments are set by error_call_handler
+ */
+ jmp error_call_handler
+
+ENTRY(coprocessor_error)
+ zeroentry do_coprocessor_error
+
+ENTRY(simd_coprocessor_error)
+ zeroentry do_simd_coprocessor_error
+
+ENTRY(device_not_available)
+ zeroentry math_state_restore
+
+ /* runs on exception stack */
+ENTRY(debug)
+ CFI_STARTPROC
+ pushq $0
+ CFI_ADJUST_CFA_OFFSET 8
+ paranoidentry do_debug
+ /* switch back to process stack to restore the state ptrace touched */
+ movq %rax,%rsp
+ jmp paranoid_exit
+ CFI_ENDPROC
+
+#if 0
+ /* runs on exception stack */
+ENTRY(nmi)
+ CFI_STARTPROC
+ pushq $-1
+ CFI_ADJUST_CFA_OFFSET 8
+ paranoidentry do_nmi
+ /* ebx: no swapgs flag */
+#endif
+paranoid_exit:
+ testl %ebx,%ebx /* swapgs needed? */
+ jnz paranoid_restore
+paranoid_swapgs:
+/* cli
+ swapgs */
+paranoid_restore:
+ RESTORE_ALL 8
+/* iretq */
+paranoid_userspace:
+/* cli */
+ GET_THREAD_INFO(%rcx)
+ movl threadinfo_flags(%rcx),%edx
+ testl $_TIF_NEED_RESCHED,%edx
+ jnz paranoid_resched
+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
+ jnz paranoid_signal
+ jmp paranoid_swapgs
+paranoid_resched:
+/* sti */
+ call schedule
+ jmp paranoid_exit
+paranoid_signal:
+/* sti */
+ xorl %esi,%esi /* oldset */
+ movq %rsp,%rdi /* &pt_regs */
+ call do_notify_resume
+ jmp paranoid_exit
+ CFI_ENDPROC
+
+ENTRY(int3)
+ zeroentry do_int3
+
+ENTRY(overflow)
+ zeroentry do_overflow
+
+ENTRY(bounds)
+ zeroentry do_bounds
+
+ENTRY(invalid_op)
+ zeroentry do_invalid_op
+
+ENTRY(coprocessor_segment_overrun)
+ zeroentry do_coprocessor_segment_overrun
+
+ENTRY(reserved)
+ zeroentry do_reserved
+
+ /* runs on exception stack */
+ENTRY(double_fault)
+ CFI_STARTPROC
+ paranoidentry do_double_fault
+ movq %rax,%rsp
+ jmp paranoid_exit
+ CFI_ENDPROC
+
+ENTRY(invalid_TSS)
+ errorentry do_invalid_TSS
+
+ENTRY(segment_not_present)
+ errorentry do_segment_not_present
+
+ /* runs on exception stack */
+ENTRY(stack_segment)
+ CFI_STARTPROC
+ paranoidentry do_stack_segment
+ movq %rax,%rsp
+ jmp paranoid_exit
+ CFI_ENDPROC
+
+ENTRY(general_protection)
+ errorentry do_general_protection
+
+ENTRY(alignment_check)
+ errorentry do_alignment_check
+
+ENTRY(divide_error)
+ zeroentry do_divide_error
+
+ENTRY(spurious_interrupt_bug)
+ zeroentry do_spurious_interrupt_bug
+
+#ifdef CONFIG_X86_MCE
+ /* runs on exception stack */
+ENTRY(machine_check)
+ CFI_STARTPROC
+ pushq $0
+ CFI_ADJUST_CFA_OFFSET 8
+ paranoidentry do_machine_check
+ jmp paranoid_exit
+ CFI_ENDPROC
+#endif
+
+ENTRY(call_debug)
+ zeroentry do_call_debug
+
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head.S b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head.S
new file mode 100644
index 0000000000..8a9caf951a
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head.S
@@ -0,0 +1,207 @@
+/*
+ * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
+ *
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
+ * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
+ *
+ * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen
+ */
+
+
+#include <linux/linkage.h>
+
+.section __xen_guest
+ .ascii "GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=3.0,VIRT_BASE=0xffffffff80100000"
+ .ascii ",LOADER=generic"
+/* .ascii ",PT_MODE_WRITABLE" */
+ .byte 0
+
+
+#include <linux/threads.h>
+#include <asm/desc.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/msr.h>
+#include <asm/cache.h>
+/* #include <asm/thread_info.h> */
+
+
+/* we are not able to switch in one step to the final KERNEL ADRESS SPACE
+ * because we need identity-mapped pages on setup so define __START_KERNEL to
+ * 0x100000 for this stage
+ *
+ */
+
+ .text
+ .code64
+ENTRY(_start)
+ cld
+ movq init_rsp(%rip),%rsp
+ /* Copy the necessary stuff from xen_start_info structure. */
+ movq $xen_start_info_union,%rdi
+ movq $64,%rcx /* sizeof (union xen_start_info_union) / sizeof (long) */
+ rep movsq
+
+#ifdef CONFIG_SMP
+ ENTRY(startup_64_smp)
+ cld
+#endif /* CONFIG_SMP */
+
+ /* zero EFLAGS after setting rsp */
+ pushq $0
+ popfq
+ movq initial_code(%rip),%rax
+ jmp *%rax
+
+ /* SMP bootup changes these two */
+ .globl initial_code
+initial_code:
+ .quad x86_64_start_kernel
+ .globl init_rsp
+init_rsp:
+ .quad init_thread_union+THREAD_SIZE-8
+
+ENTRY(early_idt_handler)
+ xorl %eax,%eax
+ movq 8(%rsp),%rsi # get rip
+ movq (%rsp),%rdx
+ leaq early_idt_msg(%rip),%rdi
+1: hlt # generate #GP
+ jmp 1b
+
+early_idt_msg:
+ .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
+
+#if 0
+ENTRY(lgdt_finish)
+ movl $(__USER_DS),%eax # DS/ES contains default USER segment
+ movw %ax,%ds
+ movw %ax,%es
+ movl $(__KERNEL_DS),%eax
+ movw %ax,%ss # after changing gdt.
+ popq %rax # get the retrun address
+ pushq $(__KERNEL_CS)
+ pushq %rax
+ lretq
+#endif
+
+ENTRY(stext)
+ENTRY(_stext)
+
+ /*
+ * This default setting generates an ident mapping at address 0x100000
+ * and a mapping for the kernel that precisely maps virtual address
+ * 0xffffffff80000000 to physical address 0x000000. (always using
+ * 2Mbyte large pages provided by PAE mode)
+ */
+.org 0x1000
+ENTRY(init_level4_pgt)
+ .fill 512,8,0
+
+ /*
+ * We update two pgd entries to make kernel and user pgd consistent
+ * at pgd_populate(). It can be used for kernel modules. So we place
+ * this page here for those cases to avoid memory corruption.
+ * We also use this page to establish the initiali mapping for
+ * vsyscall area.
+ */
+.org 0x2000
+ENTRY(init_level4_user_pgt)
+ .fill 512,8,0
+
+ /*
+ * This is used for vsyscall area mapping as we have a different
+ * level4 page table for user.
+ */
+.org 0x3000
+ENTRY(level3_user_pgt)
+ .fill 512,8,0
+
+.org 0x4000
+ENTRY(cpu_gdt_table)
+/* The TLS descriptors are currently at a different place compared to i386.
+ Hopefully nobody expects them at a fixed place (Wine?) */
+ .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x008ffa000000ffff /* __KERNEL_COMPAT32_CS */
+ .quad 0x00affa000000ffff /* __KERNEL_CS */
+ .quad 0x00cff2000000ffff /* __KERNEL_DS */
+
+ .quad 0x00cffa000000ffff /* __USER32_CS */
+ .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
+ .quad 0x00affa000000ffff /* __USER_CS */
+ .quad 0x00cffa000000ffff /* __KERNEL32_CS */
+ .quad 0,0 /* TSS */
+ .quad 0 /* LDT */
+ .quad 0,0,0 /* three TLS descriptors */
+ .quad 0 /* unused now */
+
+gdt_end:
+ /* asm/segment.h:GDT_ENTRIES must match this */
+ /* This should be a multiple of the cache line size */
+ /* GDTs of other CPUs: */
+ .fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
+
+.org 0x5000
+ENTRY(empty_zero_page)
+
+.org 0x6000
+ENTRY(empty_bad_page)
+
+.org 0x7000
+ENTRY(empty_bad_pte_table)
+
+.org 0x8000
+ENTRY(empty_bad_pmd_table)
+
+ .org 0x9000
+#ifdef CONFIG_ACPI_SLEEP
+ENTRY(wakeup_level4_pgt)
+ .quad 0x0000000000102007 /* -> level3_ident_pgt */
+ .fill 255,8,0
+ .quad 0x000000000010a007
+ .fill 254,8,0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad 0x0000000000103007 /* -> level3_kernel_pgt */
+#endif
+
+ .data
+
+ .align 16
+ .globl cpu_gdt_descr
+cpu_gdt_descr:
+ .word gdt_end-cpu_gdt_table
+gdt:
+ .quad cpu_gdt_table
+#ifdef CONFIG_SMP
+ .rept NR_CPUS-1
+ .word 0
+ .quad 0
+ .endr
+#endif
+
+ENTRY(gdt_table32)
+ .quad 0x0000000000000000 /* This one is magic */
+ .quad 0x0000000000000000 /* unused */
+ .quad 0x00af9a000000ffff /* __KERNEL_CS */
+gdt32_end:
+
+/* We need valid kernel segments for data and code in long mode too
+ * IRET will check the segment types kkeil 2000/10/28
+ * Also sysret mandates a special GDT layout
+ */
+
+#if 0
+.align L1_CACHE_BYTES
+#endif
+ .align L1_CACHE_BYTES
+ENTRY(idt_table)
+ .rept 256
+ .quad 0
+ .quad 0
+ .endr
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head64.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head64.c
new file mode 100644
index 0000000000..a4304edf6d
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head64.c
@@ -0,0 +1,132 @@
+/*
+ * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
+ *
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ *
+ * $Id: head64.c,v 1.22 2001/07/06 14:28:20 ak Exp $
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/percpu.h>
+
+#include <asm/processor.h>
+#include <asm/proto.h>
+#include <asm/smp.h>
+#include <asm/bootsetup.h>
+#include <asm/setup.h>
+#include <asm/desc.h>
+
+unsigned long start_pfn;
+
+/* Don't add a printk in there. printk relies on the PDA which is not initialized
+ yet. */
+#if 0
+static void __init clear_bss(void)
+{
+ extern char __bss_start[], __bss_end[];
+ memset(__bss_start, 0,
+ (unsigned long) __bss_end - (unsigned long) __bss_start);
+}
+#endif
+
+extern char x86_boot_params[2048];
+
+#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
+#define OLD_CL_MAGIC_ADDR 0x90020
+#define OLD_CL_MAGIC 0xA33F
+#define OLD_CL_BASE_ADDR 0x90000
+#define OLD_CL_OFFSET 0x90022
+
+extern char saved_command_line[];
+
+#if 0
+static void __init copy_bootdata(char *real_mode_data)
+{
+ int new_data;
+ char * command_line;
+
+ memcpy(x86_boot_params, real_mode_data, 2048);
+ new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
+ if (!new_data) {
+ if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
+ printk("so old bootloader that it does not support commandline?!\n");
+ return;
+ }
+ new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
+ printk("old bootloader convention, maybe loadlin?\n");
+ }
+ command_line = (char *) ((u64)(new_data));
+ memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
+ printk("Bootdata ok (command line is %s)\n", saved_command_line);
+}
+#endif
+
+static void __init setup_boot_cpu_data(void)
+{
+ int dummy, eax;
+
+ /* get vendor info */
+ cpuid(0, &boot_cpu_data.cpuid_level,
+ (int *)&boot_cpu_data.x86_vendor_id[0],
+ (int *)&boot_cpu_data.x86_vendor_id[8],
+ (int *)&boot_cpu_data.x86_vendor_id[4]);
+
+ /* get cpu type */
+ cpuid(1, &eax, &dummy, &dummy,
+ (unsigned int *) &boot_cpu_data.x86_capability);
+ boot_cpu_data.x86 = (eax >> 8) & 0xf;
+ boot_cpu_data.x86_model = (eax >> 4) & 0xf;
+ boot_cpu_data.x86_mask = eax & 0xf;
+}
+
+extern char _end[];
+
+void __init x86_64_start_kernel(char * real_mode_data)
+{
+ int i;
+
+ phys_to_machine_mapping = (u32 *)xen_start_info.mfn_list;
+ start_pfn = (__pa(xen_start_info.pt_base) >> PAGE_SHIFT) + xen_start_info.nr_pt_frames;
+
+ for (i = 0; i < 256; i++)
+ set_intr_gate(i, early_idt_handler);
+#if 0
+ asm volatile("lidt %0" :: "m" (idt_descr));
+#endif
+ pda_init(0);
+ /* copy_bootdata(real_mode_data); */
+#ifdef CONFIG_SMP
+ cpu_set(0, cpu_online_map);
+#endif
+ /* default console: */
+ if (!strstr(saved_command_line, "console="))
+ strcat(saved_command_line, " console=tty0");
+#if 0
+ s = strstr(saved_command_line, "earlyprintk=");
+ if (s != NULL)
+ setup_early_printk(s);
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+ s = strstr(saved_command_line, "numa=");
+ if (s != NULL)
+ numa_setup(s+5);
+#endif
+#ifdef CONFIG_X86_IO_APIC
+ if (strstr(saved_command_line, "disableapic"))
+ disable_apic = 1;
+#endif
+ /* You need early console to see that */
+ if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
+ panic("Kernel too big for kernel mapping\n");
+
+ setup_boot_cpu_data();
+ start_kernel();
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/init_task.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/init_task.c
new file mode 100644
index 0000000000..c4dc914911
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/init_task.c
@@ -0,0 +1,49 @@
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
+/*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
+ * so they are allowed to end up in the .data.cacheline_aligned
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp;
+
+#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ioport.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ioport.c
new file mode 100644
index 0000000000..ff96e22341
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ioport.c
@@ -0,0 +1,63 @@
+/*
+ * linux/arch/x86_64/kernel/ioport.c
+ *
+ * This contains the io-permission bitmap code - written by obz, with changes
+ * by Linus.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/thread_info.h>
+#include <asm-xen/xen-public/physdev.h>
+
+/*
+ * sys_iopl has to be used when you want to access the IO ports
+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
+ *
+ */
+
+// asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs)
+asmlinkage long sys_iopl(unsigned int new_io_pl)
+{
+ unsigned int old_io_pl = current->thread.io_pl;
+ physdev_op_t op;
+
+
+ if (new_io_pl > 3)
+ return -EINVAL;
+
+ /* Need "raw I/O" privileges for direct port access. */
+ if ((new_io_pl > old_io_pl) && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ /* Maintain OS privileges even if user attempts to relinquish them. */
+ if (new_io_pl == 0)
+ new_io_pl = 1;
+
+ /* Change our version of the privilege levels. */
+ current->thread.io_pl = new_io_pl;
+
+ /* Force the change at ring 0. */
+ op.cmd = PHYSDEVOP_SET_IOPL;
+ op.u.set_iopl.iopl = new_io_pl;
+ HYPERVISOR_physdev_op(&op);
+
+ return 0;
+}
+
+/*
+ * this changes the io permissions bitmap in the current task.
+ */
+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+{
+ return turn_on ? sys_iopl(3) : 0;
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/irq.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/irq.c
new file mode 100644
index 0000000000..08effa3bc1
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/irq.c
@@ -0,0 +1,105 @@
+/*
+ * linux/arch/x86_64/kernel/irq.c
+ *
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file contains the lowest level x86_64-specific interrupt
+ * entry and irq statistics code. All the remaining irq logic is
+ * done by the generic kernel/irq/ code and in the
+ * x86_64-specific irq controller code. (e.g. i8259.c and
+ * io_apic.c.)
+ */
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+
+/*
+ * Interrupt statistics:
+ */
+
+atomic_t irq_err_count;
+
+
+/*
+ * Generic, controller-independent functions:
+ */
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, j;
+ struct irqaction * action;
+ unsigned long flags;
+
+ if (i == 0) {
+ seq_printf(p, " ");
+ for (j=0; j<NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "CPU%d ",j);
+ seq_putc(p, '\n');
+ }
+
+ if (i < NR_IRQS) {
+ spin_lock_irqsave(&irq_desc[i].lock, flags);
+ action = irq_desc[i].action;
+ if (!action)
+ goto skip;
+ seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+ for (j=0; j<NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ",
+ kstat_cpu(j).irqs[i]);
+#endif
+ seq_printf(p, " %14s", irq_desc[i].handler->typename);
+
+ seq_printf(p, " %s", action->name);
+ for (action=action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+ seq_putc(p, '\n');
+skip:
+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ } else if (i == NR_IRQS) {
+ seq_printf(p, "NMI: ");
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
+ seq_putc(p, '\n');
+#ifdef CONFIG_X86_LOCAL_APIC
+ seq_printf(p, "LOC: ");
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", cpu_pda[j].apic_timer_irqs);
+ seq_putc(p, '\n');
+#endif
+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+#ifdef CONFIG_X86_IO_APIC
+#ifdef APIC_MISMATCH_DEBUG
+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+#endif
+#endif
+ }
+ return 0;
+}
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
+{
+ /* high bits used in ret_from_ code */
+ int irq = regs->orig_rax & __IRQ_MASK(HARDIRQ_BITS);
+
+ irq_enter();
+
+ __do_IRQ(irq, regs);
+ irq_exit();
+
+ return 1;
+}
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ldt.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ldt.c
new file mode 100644
index 0000000000..9111fb80d0
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ldt.c
@@ -0,0 +1,267 @@
+/*
+ * linux/arch/x86_64/kernel/ldt.c
+ *
+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2002 Andi Kleen
+ *
+ * This handles calls from both 32bit and 64bit mode.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/ldt.h>
+#include <asm/desc.h>
+#include <asm/proto.h>
+#include <asm/pgalloc.h>
+
+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
+static void flush_ldt(void *null)
+{
+ if (current->active_mm)
+ load_LDT(&current->active_mm->context);
+}
+#endif
+
+static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
+{
+ void *oldldt;
+ void *newldt;
+ unsigned oldsize;
+
+ if (mincount <= (unsigned)pc->size)
+ return 0;
+ oldsize = pc->size;
+ mincount = (mincount+511)&(~511);
+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
+ else
+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
+
+ if (!newldt)
+ return -ENOMEM;
+
+ if (oldsize)
+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
+ oldldt = pc->ldt;
+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
+ wmb();
+ pc->ldt = newldt;
+ wmb();
+ pc->size = mincount;
+ wmb();
+ if (reload) {
+#ifdef CONFIG_SMP
+ cpumask_t mask;
+
+ preempt_disable();
+#endif
+ make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ load_LDT(pc);
+#ifdef CONFIG_SMP
+ mask = cpumask_of_cpu(smp_processor_id());
+ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
+ smp_call_function(flush_ldt, NULL, 1, 1);
+ preempt_enable();
+#else
+ load_LDT(pc);
+#endif
+ }
+ if (oldsize) {
+ make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(oldldt);
+ else
+ kfree(oldldt);
+ }
+ return 0;
+}
+
+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+{
+ int err = alloc_ldt(new, old->size, 0);
+ if (err < 0)
+ return err;
+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+ make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ return 0;
+}
+
+/*
+ * we do not have to muck with descriptors here, that is
+ * done in switch_mm() as needed.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ struct mm_struct * old_mm;
+ int retval = 0;
+
+ init_MUTEX(&mm->context.sem);
+ mm->context.size = 0;
+ old_mm = current->mm;
+ if (old_mm && old_mm->context.size > 0) {
+ down(&old_mm->context.sem);
+ retval = copy_ldt(&mm->context, &old_mm->context);
+ up(&old_mm->context.sem);
+ }
+ return retval;
+}
+
+/*
+ *
+ * Don't touch the LDT register - we're already in the next thread.
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ if (mm->context.size) {
+ if (mm == current->active_mm)
+ clear_LDT();
+ make_pages_writable(mm->context.ldt,
+ (mm->context.size * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(mm->context.ldt);
+ else
+ kfree(mm->context.ldt);
+ mm->context.size = 0;
+ }
+}
+
+static int read_ldt(void __user * ptr, unsigned long bytecount)
+{
+ int err;
+ unsigned long size;
+ struct mm_struct * mm = current->mm;
+
+ if (!mm->context.size)
+ return 0;
+ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
+
+ down(&mm->context.sem);
+ size = mm->context.size*LDT_ENTRY_SIZE;
+ if (size > bytecount)
+ size = bytecount;
+
+ err = 0;
+ if (copy_to_user(ptr, mm->context.ldt, size))
+ err = -EFAULT;
+ up(&mm->context.sem);
+ if (err < 0)
+ goto error_return;
+ if (size != bytecount) {
+ /* zero-fill the rest */
+ if (clear_user(ptr+size, bytecount-size) != 0) {
+ err = -EFAULT;
+ goto error_return;
+ }
+ }
+ return bytecount;
+error_return:
+ return err;
+}
+
+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
+{
+ /* Arbitrary number */
+ /* x86-64 default LDT is all zeros */
+ if (bytecount > 128)
+ bytecount = 128;
+ if (clear_user(ptr, bytecount))
+ return -EFAULT;
+ return bytecount;
+}
+
+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
+{
+ struct task_struct *me = current;
+ struct mm_struct * mm = me->mm;
+ unsigned long entry = 0, *lp;
+ unsigned long mach_lp;
+ int error;
+ struct user_desc ldt_info;
+
+ error = -EINVAL;
+
+ if (bytecount != sizeof(ldt_info))
+ goto out;
+ error = -EFAULT;
+ if (copy_from_user(&ldt_info, ptr, bytecount))
+ goto out;
+
+ error = -EINVAL;
+ if (ldt_info.entry_number >= LDT_ENTRIES)
+ goto out;
+ if (ldt_info.contents == 3) {
+ if (oldmode)
+ goto out;
+ if (ldt_info.seg_not_present == 0)
+ goto out;
+ }
+
+ down(&mm->context.sem);
+ if (ldt_info.entry_number >= (unsigned)mm->context.size) {
+ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
+ if (error < 0)
+ goto out_unlock;
+ }
+
+ lp = (unsigned long *)((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
+ mach_lp = arbitrary_virt_to_machine(lp);
+
+ /* Allow LDTs to be cleared by the user. */
+ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+ if (oldmode || LDT_empty(&ldt_info)) {
+ entry = 0;
+ goto install;
+ }
+ }
+
+#if 0
+ entry = LDT_entry(&ldt_info);
+#endif
+ if (oldmode)
+ entry &= ~(1 << 20);
+
+ /* Install the new entry ... */
+install:
+ error = HYPERVISOR_update_descriptor(mach_lp, entry);
+
+out_unlock:
+ up(&mm->context.sem);
+out:
+ return error;
+}
+
+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+{
+ int ret = -ENOSYS;
+
+ switch (func) {
+ case 0:
+ ret = read_ldt(ptr, bytecount);
+ break;
+ case 1:
+ ret = write_ldt(ptr, bytecount, 1);
+ break;
+ case 2:
+ ret = read_default_ldt(ptr, bytecount);
+ break;
+ case 0x11:
+ ret = write_ldt(ptr, bytecount, 0);
+ break;
+ }
+ return ret;
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c
new file mode 100644
index 0000000000..70a2a38b7e
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c
@@ -0,0 +1,256 @@
+/*
+ * Dynamic DMA mapping support.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm-xen/balloon.h>
+
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scatter-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+ for (i = 0; i < nents; i++ ) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(!s->page);
+ s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ s->dma_length = s->length;
+ }
+ return nents;
+}
+
+EXPORT_SYMBOL(dma_map_sg);
+
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int dir)
+{
+ int i;
+ for (i = 0; i < nents; i++) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(s->page == NULL);
+ BUG_ON(s->dma_address == 0);
+ dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
+ }
+}
+
+struct dma_coherent_mem {
+ void *virt_base;
+ u32 device_base;
+ int size;
+ int flags;
+ unsigned long *bitmap;
+};
+
+static void
+xen_contig_memory(unsigned long vstart, unsigned int order)
+{
+ /*
+ * Ensure multi-page extents are contiguous in machine memory.
+ * This code could be cleaned up some, and the number of
+ * hypercalls reduced.
+ */
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long pfn, i, flags;
+
+ scrub_pages(vstart, 1 << order);
+
+ balloon_lock(flags);
+
+ /* 1. Zap current PTEs, giving away the underlying pages. */
+ for (i = 0; i < (1<<order); i++) {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ pfn = pte->pte >> PAGE_SHIFT;
+ xen_l1_entry_update(pte, 0);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+ (u32)INVALID_P2M_ENTRY;
+ if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+ &pfn, 1, 0) != 1) BUG();
+ }
+ /* 2. Get a new contiguous memory extent. */
+ if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+ &pfn, 1, order) != 1) BUG();
+ /* 3. Map the new extent in place of old pages. */
+ for (i = 0; i < (1<<order); i++) {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ xen_l1_entry_update(
+ pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
+ xen_machphys_update(
+ pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+ pfn+i;
+ }
+ /* Flush updates through and flush the TLB. */
+ xen_tlb_flush();
+
+ balloon_unlock(flags);
+}
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, unsigned gfp)
+{
+ void *ret;
+ unsigned int order = get_order(size);
+ unsigned long vstart;
+
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ if (mem) {
+ int page = bitmap_find_free_region(mem->bitmap, mem->size,
+ order);
+ if (page >= 0) {
+ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
+ ret = mem->virt_base + (page << PAGE_SHIFT);
+ memset(ret, 0, size);
+ return ret;
+ }
+ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
+ return NULL;
+ }
+
+ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+ gfp |= GFP_DMA;
+
+ vstart = __get_free_pages(gfp, order);
+ ret = (void *)vstart;
+ if (ret == NULL)
+ return ret;
+
+ xen_contig_memory(vstart, order);
+
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+ int order = get_order(size);
+
+ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+
+ bitmap_release_region(mem->bitmap, page, order);
+ } else
+ free_pages((unsigned long)vaddr, order);
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+#if 0
+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+ dma_addr_t device_addr, size_t size, int flags)
+{
+ void __iomem *mem_base;
+ int pages = size >> PAGE_SHIFT;
+ int bitmap_size = (pages + 31)/32;
+
+ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
+ goto out;
+ if (!size)
+ goto out;
+ if (dev->dma_mem)
+ goto out;
+
+ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
+
+ mem_base = ioremap(bus_addr, size);
+ if (!mem_base)
+ goto out;
+
+ dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+ if (!dev->dma_mem)
+ goto out;
+ memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
+ dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
+ if (!dev->dma_mem->bitmap)
+ goto free1_out;
+ memset(dev->dma_mem->bitmap, 0, bitmap_size);
+
+ dev->dma_mem->virt_base = mem_base;
+ dev->dma_mem->device_base = device_addr;
+ dev->dma_mem->size = pages;
+ dev->dma_mem->flags = flags;
+
+ if (flags & DMA_MEMORY_MAP)
+ return DMA_MEMORY_MAP;
+
+ return DMA_MEMORY_IO;
+
+ free1_out:
+ kfree(dev->dma_mem->bitmap);
+ out:
+ return 0;
+}
+EXPORT_SYMBOL(dma_declare_coherent_memory);
+
+void dma_release_declared_memory(struct device *dev)
+{
+ struct dma_coherent_mem *mem = dev->dma_mem;
+
+ if(!mem)
+ return;
+ dev->dma_mem = NULL;
+ iounmap(mem->virt_base);
+ kfree(mem->bitmap);
+ kfree(mem);
+}
+EXPORT_SYMBOL(dma_release_declared_memory);
+
+void *dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size)
+{
+ struct dma_coherent_mem *mem = dev->dma_mem;
+ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int pos, err;
+
+ if (!mem)
+ return ERR_PTR(-EINVAL);
+
+ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
+ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
+ if (err != 0)
+ return ERR_PTR(err);
+ return mem->virt_base + (pos << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
+#endif
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-nommu.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-nommu.c
new file mode 100644
index 0000000000..b359ae80bb
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-nommu.c
@@ -0,0 +1,96 @@
+/* Fallback functions when the main IOMMU code is not compiled in. This
+ code is roughly equivalent to i386. */
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <asm/proto.h>
+#include <asm/processor.h>
+
+int iommu_merge = 0;
+EXPORT_SYMBOL(iommu_merge);
+
+dma_addr_t bad_dma_address;
+EXPORT_SYMBOL(bad_dma_address);
+
+int iommu_bio_merge = 0;
+EXPORT_SYMBOL(iommu_bio_merge);
+
+int iommu_sac_force = 0;
+EXPORT_SYMBOL(iommu_sac_force);
+
+#if 0
+/*
+ * Dummy IO MMU functions
+ */
+
+void *dma_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, unsigned gfp)
+{
+ void *ret;
+ u64 mask;
+ int order = get_order(size);
+
+ if (hwdev)
+ mask = hwdev->coherent_dma_mask & *hwdev->dma_mask;
+ else
+ mask = 0xffffffff;
+ for (;;) {
+ ret = (void *)__get_free_pages(gfp, order);
+ if (ret == NULL)
+ return NULL;
+ *dma_handle = virt_to_bus(ret);
+ if ((*dma_handle & ~mask) == 0)
+ break;
+ free_pages((unsigned long)ret, order);
+ if (gfp & GFP_DMA)
+ return NULL;
+ gfp |= GFP_DMA;
+ }
+
+ memset(ret, 0, size);
+ return ret;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+EXPORT_SYMBOL(dma_free_coherent);
+#endif
+
+int dma_supported(struct device *hwdev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ * RED-PEN this won't work for pci_map_single. Caller has to
+ * use GFP_DMA in the first place.
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(dma_supported);
+
+int dma_get_cache_alignment(void)
+{
+ return boot_cpu_data.x86_clflush_size;
+}
+EXPORT_SYMBOL(dma_get_cache_alignment);
+
+static int __init check_ram(void)
+{
+ if (end_pfn >= 0xffffffff>>PAGE_SHIFT) {
+ printk(
+ KERN_ERR "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
+ KERN_ERR "WARNING 32bit PCI may malfunction.\n");
+ }
+ return 0;
+}
+__initcall(check_ram);
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c
new file mode 100644
index 0000000000..263176f6b5
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c
@@ -0,0 +1,713 @@
+/*
+ * linux/arch/x86-64/kernel/process.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ * X86-64 port
+ * Andi Kleen.
+ *
+ * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/elfcore.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/module.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/ptrace.h>
+#include <linux/utsname.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/mmu_context.h>
+#include <asm/pda.h>
+#include <asm/prctl.h>
+#include <asm/kdebug.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+#include <asm-xen/xen-public/physdev.h>
+#include <asm/desc.h>
+#include <asm/proto.h>
+#include <asm/hardirq.h>
+#include <asm/ia32.h>
+
+asmlinkage extern void ret_from_fork(void);
+
+unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
+
+atomic_t hlt_counter = ATOMIC_INIT(0);
+
+unsigned long boot_option_idle_override = 0;
+EXPORT_SYMBOL(boot_option_idle_override);
+
+/*
+ * Powermanagement idle function, if any..
+ */
+void (*pm_idle)(void);
+static cpumask_t cpu_idle_map;
+
+void disable_hlt(void)
+{
+ atomic_inc(&hlt_counter);
+}
+
+EXPORT_SYMBOL(disable_hlt);
+
+void enable_hlt(void)
+{
+ atomic_dec(&hlt_counter);
+}
+
+EXPORT_SYMBOL(enable_hlt);
+
+/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
+extern int set_timeout_timer(void);
+void xen_idle(void)
+{
+ int cpu;
+
+ local_irq_disable();
+
+ cpu = smp_processor_id();
+ if (rcu_pending(cpu))
+ rcu_check_callbacks(cpu, 0);
+
+ if (need_resched()) {
+ local_irq_enable();
+ } else if (set_timeout_timer() == 0) {
+ /* NB. Blocking reenable events in a race-free manner. */
+ HYPERVISOR_block();
+ } else {
+ local_irq_enable();
+ HYPERVISOR_yield();
+ }
+}
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle (void)
+{
+ int cpu = smp_processor_id();
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ while (!need_resched()) {
+ if (cpu_isset(cpu, cpu_idle_map))
+ cpu_clear(cpu, cpu_idle_map);
+ rmb();
+
+ __IRQ_STAT(cpu,idle_timestamp) = jiffies;
+ xen_idle();
+ }
+ schedule();
+ }
+}
+
+/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
+/* Always use xen_idle() instead. */
+void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
+
+/* Prints also some state that isn't saved in the pt_regs */
+void __show_regs(struct pt_regs * regs)
+{
+ unsigned long fs, gs, shadowgs;
+ unsigned int fsindex,gsindex;
+ unsigned int ds,cs,es;
+
+ printk("\n");
+ print_modules();
+ printk("Pid: %d, comm: %.20s %s %s\n",
+ current->pid, current->comm, print_tainted(), system_utsname.release);
+ printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
+ printk_address(regs->rip);
+ printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
+ printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
+ regs->rax, regs->rbx, regs->rcx);
+ printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
+ regs->rdx, regs->rsi, regs->rdi);
+ printk("RBP: %016lx R08: %016lx R09: %016lx\n",
+ regs->rbp, regs->r8, regs->r9);
+ printk("R10: %016lx R11: %016lx R12: %016lx\n",
+ regs->r10, regs->r11, regs->r12);
+ printk("R13: %016lx R14: %016lx R15: %016lx\n",
+ regs->r13, regs->r14, regs->r15);
+
+ asm("movl %%ds,%0" : "=r" (ds));
+ asm("movl %%cs,%0" : "=r" (cs));
+ asm("movl %%es,%0" : "=r" (es));
+ asm("movl %%fs,%0" : "=r" (fsindex));
+ asm("movl %%gs,%0" : "=r" (gsindex));
+
+ rdmsrl(MSR_FS_BASE, fs);
+ rdmsrl(MSR_GS_BASE, gs);
+ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+
+ printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
+ fs,fsindex,gs,gsindex,shadowgs);
+ printk("CS: %04x DS: %04x ES: %04x\n", cs, ds, es);
+
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ __show_regs(regs);
+ show_trace(&regs->rsp);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+ struct task_struct *me = current;
+ struct thread_struct *t = &me->thread;
+ if (me->thread.io_bitmap_ptr) {
+ struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
+
+ kfree(t->io_bitmap_ptr);
+ t->io_bitmap_ptr = NULL;
+ /*
+ * Careful, clear this in the TSS too:
+ */
+ memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
+ t->io_bitmap_max = 0;
+ put_cpu();
+ }
+}
+
+void load_gs_index(unsigned gs)
+{
+ HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs);
+}
+
+void flush_thread(void)
+{
+ struct task_struct *tsk = current;
+ struct thread_info *t = current_thread_info();
+
+ if (t->flags & _TIF_ABI_PENDING)
+ t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
+
+ tsk->thread.debugreg0 = 0;
+ tsk->thread.debugreg1 = 0;
+ tsk->thread.debugreg2 = 0;
+ tsk->thread.debugreg3 = 0;
+ tsk->thread.debugreg6 = 0;
+ tsk->thread.debugreg7 = 0;
+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+ /*
+ * Forget coprocessor state..
+ */
+ clear_fpu(tsk);
+ clear_used_math();
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+ if (dead_task->mm) {
+ if (dead_task->mm->context.size) {
+ printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
+ dead_task->comm,
+ dead_task->mm->context.ldt,
+ dead_task->mm->context.size);
+ BUG();
+ }
+ }
+}
+
+static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
+{
+ struct user_desc ud = {
+ .base_addr = addr,
+ .limit = 0xfffff,
+ .contents = (3 << 3), /* user */
+ .seg_32bit = 1,
+ .limit_in_pages = 1,
+ .useable = 1,
+ };
+ struct n_desc_struct *desc = (void *)t->thread.tls_array;
+ desc += tls;
+ desc->a = LDT_entry_a(&ud);
+ desc->b = LDT_entry_b(&ud);
+}
+
+static inline u32 read_32bit_tls(struct task_struct *t, int tls)
+{
+ struct desc_struct *desc = (void *)t->thread.tls_array;
+ desc += tls;
+ return desc->base0 |
+ (((u32)desc->base1) << 16) |
+ (((u32)desc->base2) << 24);
+}
+
+/*
+ * This gets called before we allocate a new thread and copy
+ * the current task into it.
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+ unlazy_fpu(tsk);
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
+ unsigned long unused,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ int err;
+ struct pt_regs * childregs;
+ struct task_struct *me = current;
+
+ childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+
+ *childregs = *regs;
+
+ childregs->rax = 0;
+ childregs->rsp = rsp;
+ if (rsp == ~0UL) {
+ childregs->rsp = (unsigned long)childregs;
+ }
+
+ p->thread.rsp = (unsigned long) childregs;
+ p->thread.rsp0 = (unsigned long) (childregs+1);
+ p->thread.userrsp = me->thread.userrsp;
+
+ set_ti_thread_flag(p->thread_info, TIF_FORK);
+
+ p->thread.fs = me->thread.fs;
+ p->thread.gs = me->thread.gs;
+
+ asm("movl %%gs,%0" : "=m" (p->thread.gsindex));
+ asm("movl %%fs,%0" : "=m" (p->thread.fsindex));
+ asm("movl %%es,%0" : "=m" (p->thread.es));
+ asm("movl %%ds,%0" : "=m" (p->thread.ds));
+
+ if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
+ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+ if (!p->thread.io_bitmap_ptr) {
+ p->thread.io_bitmap_max = 0;
+ return -ENOMEM;
+ }
+ memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
+ }
+
+ /*
+ * Set a new TLS for the child thread?
+ */
+ if (clone_flags & CLONE_SETTLS) {
+#ifdef CONFIG_IA32_EMULATION
+ if (test_thread_flag(TIF_IA32))
+ err = ia32_child_tls(p, childregs);
+ else
+#endif
+ err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
+ if (err)
+ goto out;
+ }
+ p->thread.io_pl = current->thread.io_pl;
+
+ err = 0;
+out:
+ if (err && p->thread.io_bitmap_ptr) {
+ kfree(p->thread.io_bitmap_ptr);
+ p->thread.io_bitmap_max = 0;
+ }
+ return err;
+}
+
+/*
+ * This special macro can be used to load a debugging register
+ */
+#define loaddebug(thread,register) \
+ HYPERVISOR_set_debugreg((register), \
+ (thread->debugreg ## register))
+
+
+static inline void __save_init_fpu( struct task_struct *tsk )
+{
+ asm volatile( "rex64 ; fxsave %0 ; fnclex"
+ : "=m" (tsk->thread.i387.fxsave));
+ tsk->thread_info->status &= ~TS_USEDFPU;
+}
+
+/*
+ * switch_to(x,y) should switch tasks from x to y.
+ *
+ * This could still be optimized:
+ * - fold all the options into a flag word and test it with a single test.
+ * - could test fs/gs bitsliced
+ */
+struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+{
+ struct thread_struct *prev = &prev_p->thread,
+ *next = &next_p->thread;
+ int cpu = smp_processor_id();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ physdev_op_t iopl_op, iobmp_op;
+ multicall_entry_t _mcl[8], *mcl = _mcl;
+
+ /*
+ * This is basically '__unlazy_fpu', except that we queue a
+ * multicall to indicate FPU task switch, rather than
+ * synchronously trapping to Xen.
+ */
+ if (prev_p->thread_info->status & TS_USEDFPU) {
+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
+ mcl->op = __HYPERVISOR_fpu_taskswitch;
+ mcl->args[0] = 1;
+ mcl++;
+ }
+
+ /*
+ * Reload esp0, LDT and the page table pointer:
+ */
+ tss->rsp0 = next->rsp0;
+ mcl->op = __HYPERVISOR_stack_switch;
+ mcl->args[0] = __KERNEL_DS;
+ mcl->args[1] = tss->rsp0;
+ mcl++;
+
+ /*
+ * Load the per-thread Thread-Local Storage descriptor.
+ * This is load_TLS(next, cpu) with multicalls.
+ */
+#define C(i) do { \
+ if (unlikely(next->tls_array[i] != prev->tls_array[i])) { \
+ mcl->op = __HYPERVISOR_update_descriptor; \
+ mcl->args[0] = virt_to_machine(&get_cpu_gdt_table(cpu) \
+ [GDT_ENTRY_TLS_MIN + i]); \
+ mcl->args[1] = next->tls_array[i]; \
+ mcl++; \
+ } \
+} while (0)
+ C(0); C(1); C(2);
+#undef C
+
+ if (unlikely(prev->io_pl != next->io_pl)) {
+ iopl_op.cmd = PHYSDEVOP_SET_IOPL;
+ iopl_op.u.set_iopl.iopl = next->io_pl;
+ mcl->op = __HYPERVISOR_physdev_op;
+ mcl->args[0] = (unsigned long)&iopl_op;
+ mcl++;
+ }
+
+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
+ iobmp_op.cmd =
+ PHYSDEVOP_SET_IOBITMAP;
+ iobmp_op.u.set_iobitmap.bitmap =
+ (unsigned long)next->io_bitmap_ptr;
+ iobmp_op.u.set_iobitmap.nr_ports =
+ next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
+ mcl->op = __HYPERVISOR_physdev_op;
+ mcl->args[0] = (unsigned long)&iobmp_op;
+ mcl++;
+ }
+
+ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
+ /*
+ * Switch DS and ES.
+ * This won't pick up thread selector changes, but I guess that is ok.
+ */
+ asm volatile("movl %%es,%0" : "=m" (prev->es));
+ if (unlikely(next->es | prev->es))
+ loadsegment(es, next->es);
+
+ asm volatile ("movl %%ds,%0" : "=m" (prev->ds));
+ if (unlikely(next->ds | prev->ds))
+ loadsegment(ds, next->ds);
+
+ /*
+ * Switch FS and GS.
+ */
+ {
+ unsigned fsindex;
+ asm volatile("movl %%fs,%0" : "=g" (fsindex));
+ /* segment register != 0 always requires a reload.
+ also reload when it has changed.
+ when prev process used 64bit base always reload
+ to avoid an information leak. */
+ if (unlikely(fsindex | next->fsindex | prev->fs)) {
+ loadsegment(fs, next->fsindex);
+ /* check if the user used a selector != 0
+ * if yes clear 64bit base, since overloaded base
+ * is always mapped to the Null selector
+ */
+ if (fsindex)
+ prev->fs = 0;
+ }
+ /* when next process has a 64bit base use it */
+ if (next->fs) {
+ HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs);
+ }
+ prev->fsindex = fsindex;
+ }
+ {
+ unsigned gsindex;
+ asm volatile("movl %%gs,%0" : "=g" (gsindex));
+ if (unlikely(gsindex | next->gsindex | prev->gs)) {
+ load_gs_index(next->gsindex);
+ if (gsindex)
+ prev->gs = 0;
+ }
+ if (next->gs)
+ HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs);
+ prev->gsindex = gsindex;
+ }
+
+ /*
+ * Switch the PDA context.
+ */
+ prev->userrsp = read_pda(oldrsp);
+ write_pda(oldrsp, next->userrsp);
+ write_pda(pcurrent, next_p);
+ write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
+
+ /*
+ * Now maybe reload the debug registers
+ */
+ if (unlikely(next->debugreg7)) {
+ loaddebug(next, 0);
+ loaddebug(next, 1);
+ loaddebug(next, 2);
+ loaddebug(next, 3);
+ /* no 4 and 5 */
+ loaddebug(next, 6);
+ loaddebug(next, 7);
+ }
+
+ return prev_p;
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage
+long sys_execve(char __user *name, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs regs)
+{
+ long error;
+ char * filename;
+
+ filename = getname(name);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ return error;
+ error = do_execve(filename, argv, envp, &regs);
+ if (error == 0) {
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ }
+ putname(filename);
+ return error;
+}
+
+void set_personality_64bit(void)
+{
+ /* inherit personality from parent */
+
+ /* Make sure to be in 64bit mode */
+ clear_thread_flag(TIF_IA32);
+
+ /* TBD: overwrites user setup. Should have two bits.
+ But 64bit processes have always behaved this way,
+ so it's not too bad. The main problem is just that
+ 32bit childs are affected again. */
+ current->personality &= ~READ_IMPLIES_EXEC;
+}
+
+asmlinkage long sys_fork(struct pt_regs *regs)
+{
+ return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
+}
+
+asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
+{
+ if (!newsp)
+ newsp = regs->rsp;
+ return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage long sys_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
+ NULL, NULL);
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long stack;
+ u64 fp,rip;
+ int count = 0;
+
+ if (!p || p == current || p->state==TASK_RUNNING)
+ return 0;
+ stack = (unsigned long)p->thread_info;
+ if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
+ return 0;
+ fp = *(u64 *)(p->thread.rsp);
+ do {
+ if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
+ return 0;
+ rip = *(u64 *)(fp+8);
+ if (!in_sched_functions(rip))
+ return rip;
+ fp = *(u64 *)fp;
+ } while (count++ < 16);
+ return 0;
+}
+
+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
+{
+ int ret = 0;
+ int doit = task == current;
+ int cpu;
+
+ switch (code) {
+ case ARCH_SET_GS:
+ if (addr >= TASK_SIZE)
+ return -EPERM;
+ cpu = get_cpu();
+ /* handle small bases via the GDT because that's faster to
+ switch. */
+ if (addr <= 0xffffffff) {
+ set_32bit_tls(task, GS_TLS, addr);
+ if (doit) {
+ load_TLS(&task->thread, cpu);
+ load_gs_index(GS_TLS_SEL);
+ }
+ task->thread.gsindex = GS_TLS_SEL;
+ task->thread.gs = 0;
+ } else {
+ task->thread.gsindex = 0;
+ task->thread.gs = addr;
+ if (doit) {
+ load_gs_index(0);
+ ret = HYPERVISOR_set_segment_base(SEGBASE_GS_USER, addr);
+ printk("do_arch_prctl: SET_SET: addr = %lx\n", addr);
+ }
+ }
+ put_cpu();
+ break;
+ case ARCH_SET_FS:
+ /* Not strictly needed for fs, but do it for symmetry
+ with gs */
+ if (addr >= TASK_SIZE)
+ return -EPERM;
+ cpu = get_cpu();
+ /* handle small bases via the GDT because that's faster to
+ switch. */
+ if (addr <= 0xffffffff) {
+ set_32bit_tls(task, FS_TLS, addr);
+ if (doit) {
+ load_TLS(&task->thread, cpu);
+ asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
+ }
+ task->thread.fsindex = FS_TLS_SEL;
+ task->thread.fs = 0;
+ } else {
+ task->thread.fsindex = 0;
+ task->thread.fs = addr;
+ if (doit) {
+ /* set the selector to 0 to not confuse
+ __switch_to */
+ asm volatile("movl %0,%%fs" :: "r" (0));
+ ret = HYPERVISOR_set_segment_base(SEGBASE_FS, addr);
+
+ }
+ }
+ put_cpu();
+ break;
+ case ARCH_GET_FS: {
+ unsigned long base;
+ if (task->thread.fsindex == FS_TLS_SEL)
+ base = read_32bit_tls(task, FS_TLS);
+ else if (doit) {
+ rdmsrl(MSR_FS_BASE, base);
+ printk("do_arch_prctl: GET_FS: addr = %lx\n", addr);
+ } else
+ base = task->thread.fs;
+ ret = put_user(base, (unsigned long __user *)addr);
+ break;
+ }
+ case ARCH_GET_GS: {
+ unsigned long base;
+ if (task->thread.gsindex == GS_TLS_SEL)
+ base = read_32bit_tls(task, GS_TLS);
+ else if (doit) {
+ rdmsrl(MSR_KERNEL_GS_BASE, base);
+ printk("do_arch_prctl: GET_GS: addr = %lx\n", addr);
+ } else
+ base = task->thread.gs;
+ ret = put_user(base, (unsigned long __user *)addr);
+ break;
+ }
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+long sys_arch_prctl(int code, unsigned long addr)
+{
+ return do_arch_prctl(current, code, addr);
+}
+
+/*
+ * Capture the user space registers if the task is not running (in user space)
+ */
+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+{
+ struct pt_regs *pp, ptregs;
+
+ pp = (struct pt_regs *)(tsk->thread.rsp0);
+ --pp;
+
+ ptregs = *pp;
+ ptregs.cs &= 0xffff;
+ ptregs.ss &= 0xffff;
+
+ elf_core_copy_regs(regs, &ptregs);
+
+ boot_option_idle_override = 1;
+ return 1;
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup.c
new file mode 100644
index 0000000000..d8777f7fd8
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup.c
@@ -0,0 +1,1379 @@
+/*
+ * linux/arch/x86-64/kernel/setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Nov 2001 Dave Jones <davej@suse.de>
+ * Forked from i386 setup code.
+ *
+ * $Id$
+ */
+
+/*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <asm/processor.h>
+#include <linux/console.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/kallsyms.h>
+#include <linux/edd.h>
+#include <linux/percpu.h>
+#include <asm/mtrr.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/msr.h>
+#include <asm/desc.h>
+#include <video/edid.h>
+#include <asm/e820.h>
+#include <asm/dma.h>
+#include <asm/mpspec.h>
+#include <asm/mmu_context.h>
+#include <asm/bootsetup.h>
+#include <asm/proto.h>
+#include <asm/setup.h>
+#include <asm/mach_apic.h>
+#include <asm/numa.h>
+#include <asm-xen/xen-public/physdev.h>
+#include "setup_arch_pre.h"
+#include <asm-xen/hypervisor.h>
+
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+
+#include <asm/mach-xen/setup_arch_post.h>
+
+extern unsigned long start_pfn;
+
+#if 0
+struct edid_info {
+ unsigned char dummy[128];
+};
+#endif
+
+extern struct edid_info edid_info;
+
+/* Allows setting of maximum possible memory size */
+unsigned long xen_override_max_pfn;
+/*
+ * Machine setup..
+ */
+
+struct cpuinfo_x86 boot_cpu_data;
+
+unsigned long mmu_cr4_features;
+EXPORT_SYMBOL_GPL(mmu_cr4_features);
+
+int acpi_disabled;
+EXPORT_SYMBOL(acpi_disabled);
+#ifdef CONFIG_ACPI_BOOT
+extern int __initdata acpi_ht;
+extern acpi_interrupt_flags acpi_sci_flags;
+int __initdata acpi_force = 0;
+#endif
+
+int acpi_numa __initdata;
+
+/* For PCI or other memory-mapped resources */
+unsigned long pci_mem_start = 0x10000000;
+
+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
+int bootloader_type;
+
+unsigned long saved_video_mode;
+
+#ifdef CONFIG_SWIOTLB
+int swiotlb;
+EXPORT_SYMBOL(swiotlb);
+#endif
+
+/*
+ * Setup options
+ */
+struct drive_info_struct { char dummy[32]; } drive_info;
+struct screen_info screen_info;
+struct sys_desc_table_struct {
+ unsigned short length;
+ unsigned char table[0];
+};
+
+struct edid_info edid_info;
+struct e820map e820;
+
+unsigned char aux_device_present;
+
+extern int root_mountflags;
+extern char _text, _etext, _edata, _end;
+
+char command_line[COMMAND_LINE_SIZE];
+
+struct resource standard_io_resources[] = {
+ { .name = "dma1", .start = 0x00, .end = 0x1f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "pic1", .start = 0x20, .end = 0x21,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "timer0", .start = 0x40, .end = 0x43,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "timer1", .start = 0x50, .end = 0x53,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "keyboard", .start = 0x60, .end = 0x6f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "dma page reg", .start = 0x80, .end = 0x8f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "pic2", .start = 0xa0, .end = 0xa1,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "dma2", .start = 0xc0, .end = 0xdf,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "fpu", .start = 0xf0, .end = 0xff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO }
+};
+
+#define STANDARD_IO_RESOURCES \
+ (sizeof standard_io_resources / sizeof standard_io_resources[0])
+
+#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
+
+struct resource data_resource = {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_RAM,
+};
+struct resource code_resource = {
+ .name = "Kernel code",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_RAM,
+};
+
+#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+static struct resource system_rom_resource = {
+ .name = "System ROM",
+ .start = 0xf0000,
+ .end = 0xfffff,
+ .flags = IORESOURCE_ROM,
+};
+
+static struct resource extension_rom_resource = {
+ .name = "Extension ROM",
+ .start = 0xe0000,
+ .end = 0xeffff,
+ .flags = IORESOURCE_ROM,
+};
+
+static struct resource adapter_rom_resources[] = {
+ { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM }
+};
+#endif
+
+#define ADAPTER_ROM_RESOURCES \
+ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
+
+static struct resource video_rom_resource = {
+ .name = "Video ROM",
+ .start = 0xc0000,
+ .end = 0xc7fff,
+ .flags = IORESOURCE_ROM,
+};
+
+static struct resource video_ram_resource = {
+ .name = "Video RAM area",
+ .start = 0xa0000,
+ .end = 0xbffff,
+ .flags = IORESOURCE_RAM,
+};
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
+
+static int __init romchecksum(unsigned char *rom, unsigned long length)
+{
+ unsigned char *p, sum = 0;
+
+ for (p = rom; p < rom + length; p++)
+ sum += *p;
+ return sum == 0;
+}
+
+static void __init probe_roms(void)
+{
+ unsigned long start, length, upper;
+ unsigned char *rom;
+ int i;
+
+ /* video rom */
+ upper = adapter_rom_resources[0].start;
+ for (start = video_rom_resource.start; start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ video_rom_resource.start = start;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* if checksum okay, trust length byte */
+ if (length && romchecksum(rom, length))
+ video_rom_resource.end = start + length - 1;
+
+ request_resource(&iomem_resource, &video_rom_resource);
+ break;
+ }
+
+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+ if (start < upper)
+ start = upper;
+
+ /* system rom */
+ request_resource(&iomem_resource, &system_rom_resource);
+ upper = system_rom_resource.start;
+
+ /* check for extension rom (ignore length byte!) */
+ rom = isa_bus_to_virt(extension_rom_resource.start);
+ if (romsignature(rom)) {
+ length = extension_rom_resource.end - extension_rom_resource.start + 1;
+ if (romchecksum(rom, length)) {
+ request_resource(&iomem_resource, &extension_rom_resource);
+ upper = extension_rom_resource.start;
+ }
+ }
+
+ /* check for adapter roms on 2k boundaries */
+ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* but accept any length that fits if checksum okay */
+ if (!length || start + length > upper || !romchecksum(rom, length))
+ continue;
+
+ adapter_rom_resources[i].start = start;
+ adapter_rom_resources[i].end = start + length - 1;
+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
+
+ start = adapter_rom_resources[i++].end & ~2047UL;
+ }
+}
+#endif
+
+/*
+ * Point at the empty zero page to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+EXPORT_SYMBOL(HYPERVISOR_shared_info);
+
+u32 *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
+
+EXPORT_SYMBOL(phys_to_machine_mapping);
+
+DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
+DEFINE_PER_CPU(int, nr_multicall_ents);
+
+/* Raw start-of-day parameters from the hypervisor. */
+union xen_start_info_union xen_start_info_union;
+
+static __init void parse_cmdline_early (char ** cmdline_p)
+{
+ char c = ' ', *to = command_line, *from = COMMAND_LINE;
+ int len = 0;
+
+ memcpy(saved_command_line, xen_start_info.cmd_line, MAX_CMDLINE);
+ /* Save unparsed command line copy for /proc/cmdline */
+ memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+ for (;;) {
+ if (c != ' ')
+ goto next_char;
+
+#ifdef CONFIG_SMP
+ /*
+ * If the BIOS enumerates physical processors before logical,
+ * maxcpus=N at enumeration-time can be used to disable HT.
+ */
+ else if (!memcmp(from, "maxcpus=", 8)) {
+ extern unsigned int maxcpus;
+
+ maxcpus = simple_strtoul(from + 8, NULL, 0);
+ }
+#endif
+#ifdef CONFIG_ACPI_BOOT
+ /* "acpi=off" disables both ACPI table parsing and interpreter init */
+ if (!memcmp(from, "acpi=off", 8))
+ disable_acpi();
+
+ if (!memcmp(from, "acpi=force", 10)) {
+ /* add later when we do DMI horrors: */
+ acpi_force = 1;
+ acpi_disabled = 0;
+ }
+
+ /* acpi=ht just means: do ACPI MADT parsing
+ at bootup, but don't enable the full ACPI interpreter */
+ if (!memcmp(from, "acpi=ht", 7)) {
+ if (!acpi_force)
+ disable_acpi();
+ acpi_ht = 1;
+ }
+ else if (!memcmp(from, "pci=noacpi", 10))
+ acpi_disable_pci();
+ else if (!memcmp(from, "acpi=noirq", 10))
+ acpi_noirq_set();
+
+ else if (!memcmp(from, "acpi_sci=edge", 13))
+ acpi_sci_flags.trigger = 1;
+ else if (!memcmp(from, "acpi_sci=level", 14))
+ acpi_sci_flags.trigger = 3;
+ else if (!memcmp(from, "acpi_sci=high", 13))
+ acpi_sci_flags.polarity = 1;
+ else if (!memcmp(from, "acpi_sci=low", 12))
+ acpi_sci_flags.polarity = 3;
+
+ /* acpi=strict disables out-of-spec workarounds */
+ else if (!memcmp(from, "acpi=strict", 11)) {
+ acpi_strict = 1;
+ }
+#endif
+
+#if 0
+ if (!memcmp(from, "nolapic", 7) ||
+ !memcmp(from, "disableapic", 11))
+ disable_apic = 1;
+
+ if (!memcmp(from, "noapic", 6))
+ skip_ioapic_setup = 1;
+
+ if (!memcmp(from, "apic", 4)) {
+ skip_ioapic_setup = 0;
+ ioapic_force = 1;
+ }
+#endif
+
+ if (!memcmp(from, "mem=", 4))
+ parse_memopt(from+4, &from);
+
+#ifdef CONFIG_DISCONTIGMEM
+ if (!memcmp(from, "numa=", 5))
+ numa_setup(from+5);
+#endif
+
+#ifdef CONFIG_GART_IOMMU
+ if (!memcmp(from,"iommu=",6)) {
+ iommu_setup(from+6);
+ }
+#endif
+
+ if (!memcmp(from,"oops=panic", 10))
+ panic_on_oops = 1;
+
+ if (!memcmp(from, "noexec=", 7))
+ nonx_setup(from + 7);
+
+ next_char:
+ c = *(from++);
+ if (!c)
+ break;
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+ *(to++) = c;
+ }
+ *to = '\0';
+ *cmdline_p = command_line;
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+static void __init contig_initmem_init(void)
+{
+ unsigned long bootmap_size, bootmap;
+
+ /*
+ * partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+
+ bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
+ bootmap = start_pfn;
+ bootmap_size = init_bootmem(bootmap, end_pfn);
+ reserve_bootmem(bootmap, bootmap_size);
+
+ free_bootmem(start_pfn << PAGE_SHIFT, (end_pfn - start_pfn) << PAGE_SHIFT);
+ printk("Registering memory for bootmem: from %lx, size = %lx\n",
+ start_pfn << PAGE_SHIFT, (end_pfn - start_pfn) << PAGE_SHIFT);
+ /*
+ * This should cover kernel_end
+ */
+#if 0
+ reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
+ bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
+#endif
+ reserve_bootmem(0, (PFN_PHYS(start_pfn) +
+ bootmap_size + PAGE_SIZE-1));
+
+}
+#endif
+
+/* Use inline assembly to define this because the nops are defined
+ as inline assembly strings in the include files and we cannot
+ get them easily into strings. */
+asm("\t.data\nk8nops: "
+ K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
+ K8_NOP7 K8_NOP8);
+
+extern unsigned char k8nops[];
+static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
+ NULL,
+ k8nops,
+ k8nops + 1,
+ k8nops + 1 + 2,
+ k8nops + 1 + 2 + 3,
+ k8nops + 1 + 2 + 3 + 4,
+ k8nops + 1 + 2 + 3 + 4 + 5,
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6,
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+};
+
+/* Replace instructions with better alternatives for this CPU type.
+
+ This runs before SMP is initialized to avoid SMP problems with
+ self modifying code. This implies that assymetric systems where
+ APs have less capabilities than the boot processor are not handled.
+ In this case boot with "noreplacement". */
+void apply_alternatives(void *start, void *end)
+{
+ struct alt_instr *a;
+ int diff, i, k;
+ for (a = start; (void *)a < end; a++) {
+ if (!boot_cpu_has(a->cpuid))
+ continue;
+
+ BUG_ON(a->replacementlen > a->instrlen);
+ __inline_memcpy(a->instr, a->replacement, a->replacementlen);
+ diff = a->instrlen - a->replacementlen;
+
+ /* Pad the rest with nops */
+ for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
+ k = diff;
+ if (k > ASM_NOP_MAX)
+ k = ASM_NOP_MAX;
+ __inline_memcpy(a->instr + i, k8_nops[k], k);
+ }
+ }
+}
+
+static int no_replacement __initdata = 0;
+
+void __init alternative_instructions(void)
+{
+ extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+ if (no_replacement)
+ return;
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
+
+static int __init noreplacement_setup(char *s)
+{
+ no_replacement = 1;
+ return 0;
+}
+
+__setup("noreplacement", noreplacement_setup);
+
+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+struct edd edd;
+#ifdef CONFIG_EDD_MODULE
+EXPORT_SYMBOL(edd);
+#endif
+/**
+ * copy_edd() - Copy the BIOS EDD information
+ * from boot_params into a safe place.
+ *
+ */
+static inline void copy_edd(void)
+{
+ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
+ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
+ edd.edd_info_nr = EDD_NR;
+}
+#else
+static inline void copy_edd(void)
+{
+}
+#endif
+
+#if 0
+#define EBDA_ADDR_POINTER 0x40E
+static void __init reserve_ebda_region(void)
+{
+ unsigned int addr;
+ /**
+ * there is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E
+ */
+ addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
+ addr <<= 4;
+ if (addr)
+ reserve_bootmem_generic(addr, PAGE_SIZE);
+}
+#endif
+
+/*
+ * Guest physical starts from 0.
+ */
+
+unsigned long __init xen_end_of_ram(void)
+{
+ unsigned long max_end_pfn = xen_start_info.nr_pages;
+
+ if ( xen_override_max_pfn < max_end_pfn)
+ xen_override_max_pfn = max_end_pfn;
+
+ return xen_override_max_pfn;
+}
+
+static void __init print_memory_map(char *who)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ early_printk(" %s: %016Lx - %016Lx ", who,
+ e820.map[i].addr,
+ e820.map[i].addr + e820.map[i].size);
+ switch (e820.map[i].type) {
+ case E820_RAM: early_printk("(usable)\n");
+ break;
+ case E820_RESERVED:
+ early_printk("(reserved)\n");
+ break;
+ case E820_ACPI:
+ early_printk("(ACPI data)\n");
+ break;
+ case E820_NVS:
+ early_printk("(ACPI NVS)\n");
+ break;
+ default: early_printk("type %u\n", e820.map[i].type);
+ break;
+ }
+ }
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ unsigned long low_mem_size;
+ int i, j;
+ physdev_op_t op;
+
+#if 0
+ ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
+#else
+ ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
+#endif
+ drive_info = DRIVE_INFO;
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ screen_info = SCREEN_INFO;
+#endif
+ edid_info = EDID_INFO;
+ aux_device_present = AUX_DEVICE_INFO;
+ saved_video_mode = SAVED_VIDEO_MODE;
+ bootloader_type = LOADER_TYPE;
+
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+#endif
+/* register_console(&xen_console); */
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ /* This is drawn from a dump from vgacon:startup in standard Linux. */
+ screen_info.orig_video_mode = 3;
+ screen_info.orig_video_isVGA = 1;
+ screen_info.orig_video_lines = 25;
+ screen_info.orig_video_cols = 80;
+ screen_info.orig_video_ega_bx = 3;
+ screen_info.orig_video_points = 16;
+#endif
+ ARCH_SETUP
+ print_memory_map(machine_specific_memory_setup());
+
+ /* copy_edd(); */
+
+ if (!MOUNT_ROOT_RDONLY)
+ root_mountflags &= ~MS_RDONLY;
+ init_mm.start_code = (unsigned long) &_text;
+ init_mm.end_code = (unsigned long) &_etext;
+ init_mm.end_data = (unsigned long) &_edata;
+/* init_mm.brk = (unsigned long) &_end; */
+ init_mm.brk = start_pfn << PAGE_SHIFT;
+
+
+#if 0 /* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
+ code_resource.start = virt_to_phys(&_text);
+ code_resource.end = virt_to_phys(&_etext)-1;
+ data_resource.start = virt_to_phys(&_etext);
+ data_resource.end = virt_to_phys(&_edata)-1;
+#endif
+ parse_cmdline_early(cmdline_p);
+
+ early_identify_cpu(&boot_cpu_data);
+
+ /*
+ * partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+#if 0
+ end_pfn = e820_end_of_ram();
+#else
+ end_pfn = xen_end_of_ram();
+#endif
+
+ check_efer();
+
+ init_memory_mapping(0, (end_pfn << PAGE_SHIFT));
+
+#ifdef CONFIG_ACPI_BOOT
+ /*
+ * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
+ * Call this early for SRAT node setup.
+ */
+ acpi_boot_table_init();
+#endif
+
+#ifdef CONFIG_ACPI_NUMA
+ /*
+ * Parse SRAT to discover nodes.
+ */
+ acpi_numa_init();
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+ numa_initmem_init(0, end_pfn);
+#else
+ contig_initmem_init();
+#endif
+
+ /* Reserve direct mapping and shared info etc. */
+// reserve_bootmem_generic(table_start << PAGE_SHIFT, (table_end + 1 - table_start) << PAGE_SHIFT);
+
+// reserve_bootmem_generic(0, (table_end + 1) << PAGE_SHIFT);
+
+ /* reserve kernel */
+// kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
+
+#if 0
+ /*
+ * reserve physical page 0 - it's a special BIOS page on many boxes,
+ * enabling clean reboots, SMP operation, laptop functions.
+ */
+ reserve_bootmem_generic(0, PAGE_SIZE);
+#endif
+
+ /* reserve ebda region */
+/* reserve_ebda_region(); */
+
+#ifdef CONFIG_SMP
+ /*
+ * But first pinch a few for the stack/trampoline stuff
+ * FIXME: Don't need the extra page at 4K, but need to fix
+ * trampoline before removing it. (see the GDT stuff)
+ */
+ reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
+
+ /* Reserve SMP trampoline */
+ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
+#endif
+
+#ifdef CONFIG_ACPI_SLEEP
+ /*
+ * Reserve low memory region for sleep support.
+ */
+ acpi_reserve_bootmem();
+#endif
+#ifdef CONFIG_X86_LOCAL_APIC
+ /*
+ * Find and reserve possible boot-time SMP configuration:
+ */
+ find_smp_config();
+#endif
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (xen_start_info.mod_start) {
+ if (LOADER_TYPE && INITRD_START) {
+ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
+ /* reserve_bootmem_generic(INITRD_START, INITRD_SIZE); */
+ initrd_start = INITRD_START + PAGE_OFFSET;
+ initrd_end = initrd_start+INITRD_SIZE;
+ initrd_below_start_ok = 1;
+ }
+ else {
+ printk(KERN_ERR "initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ (unsigned long)(INITRD_START + INITRD_SIZE),
+ (unsigned long)(end_pfn << PAGE_SHIFT));
+ initrd_start = 0;
+ }
+ }
+ }
+#endif
+ paging_init();
+
+ /* Make sure we have a large enough P->M table. */
+ if (end_pfn > xen_start_info.nr_pages) {
+ phys_to_machine_mapping = alloc_bootmem(
+ max_pfn * sizeof(unsigned long));
+ memset(phys_to_machine_mapping, ~0,
+ max_pfn * sizeof(unsigned long));
+ memcpy(phys_to_machine_mapping,
+ (unsigned long *)xen_start_info.mfn_list,
+ xen_start_info.nr_pages * sizeof(unsigned long));
+ free_bootmem(
+ __pa(xen_start_info.mfn_list),
+ PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
+ sizeof(unsigned long))));
+ }
+
+ pfn_to_mfn_frame_list = alloc_bootmem(PAGE_SIZE);
+
+ for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+ {
+ pfn_to_mfn_frame_list[j] =
+ virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+ }
+
+#if 0
+ check_ioapic();
+#endif
+
+#ifdef CONFIG_ACPI_BOOT
+ /*
+ * Read APIC and some other early information from ACPI tables.
+ */
+ acpi_boot_init();
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ /*
+ * get boot-time SMP configuration:
+ */
+ if (smp_found_config)
+ get_smp_config();
+ init_apic_mappings();
+#endif
+
+ /* XXX Disable irqdebug until we have a way to avoid interrupt
+ * conflicts. */
+/* noirqdebug_setup(""); */
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ /*
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
+ */
+ probe_roms();
+#endif
+/* e820_reserve_resources(); */
+
+ request_resource(&iomem_resource, &video_ram_resource);
+
+ {
+ unsigned i;
+ /* request I/O space for devices used on all i[345]86 PCs */
+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
+ request_resource(&ioport_resource, &standard_io_resources[i]);
+ }
+
+ /* Will likely break when you have unassigned resources with more
+ than 4GB memory and bridges that don't support more than 4GB.
+ Doing it properly would require to use pci_alloc_consistent
+ in this case. */
+ low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
+ if (low_mem_size > pci_mem_start)
+ pci_mem_start = low_mem_size;
+
+#ifdef CONFIG_GART_IOMMU
+ iommu_hole_init();
+#endif
+
+ op.cmd = PHYSDEVOP_SET_IOPL;
+ op.u.set_iopl.iopl = current->thread.io_pl = 1;
+ HYPERVISOR_physdev_op(&op);
+
+ if (xen_start_info.flags & SIF_INITDOMAIN) {
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ panic("Xen granted us console access "
+ "but not privileged status");
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+#endif
+ } else {
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ extern const struct consw xennull_con;
+ extern int console_use_vt;
+#if defined(CONFIG_VGA_CONSOLE)
+ /* disable VGA driver */
+ ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
+#endif
+ conswitchp = &xennull_con;
+ console_use_vt = 0;
+#endif
+ }
+}
+
+static int __init get_model_name(struct cpuinfo_x86 *c)
+{
+ unsigned int *v;
+
+ if (c->x86_cpuid_level < 0x80000004)
+ return 0;
+
+ v = (unsigned int *) c->x86_model_id;
+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+ c->x86_model_id[48] = 0;
+ return 1;
+}
+
+
+static void __init display_cacheinfo(struct cpuinfo_x86 *c)
+{
+ unsigned int n, dummy, eax, ebx, ecx, edx;
+
+ n = c->x86_cpuid_level;
+
+ if (n >= 0x80000005) {
+ cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+ c->x86_cache_size=(ecx>>24)+(edx>>24);
+ /* On K8 L1 TLB is inclusive, so don't count it */
+ c->x86_tlbsize = 0;
+ }
+
+ if (n >= 0x80000006) {
+ cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
+ ecx = cpuid_ecx(0x80000006);
+ c->x86_cache_size = ecx >> 16;
+ c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
+
+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
+ c->x86_cache_size, ecx & 0xFF);
+ }
+
+ if (n >= 0x80000007)
+ cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
+ if (n >= 0x80000008) {
+ cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
+ c->x86_virt_bits = (eax >> 8) & 0xff;
+ c->x86_phys_bits = eax & 0xff;
+ }
+}
+
+
+static int __init init_amd(struct cpuinfo_x86 *c)
+{
+ int r;
+ int level;
+#ifdef CONFIG_NUMA
+ int cpu;
+#endif
+
+ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+ clear_bit(0*32+31, &c->x86_capability);
+
+ /* C-stepping K8? */
+ level = cpuid_eax(1);
+ if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
+ set_bit(X86_FEATURE_K8_C, &c->x86_capability);
+
+ r = get_model_name(c);
+ if (!r) {
+ switch (c->x86) {
+ case 15:
+ /* Should distinguish Models here, but this is only
+ a fallback anyways. */
+ strcpy(c->x86_model_id, "Hammer");
+ break;
+ }
+ }
+ display_cacheinfo(c);
+
+ if (c->x86_cpuid_level >= 0x80000008) {
+ c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+ if (c->x86_num_cores & (c->x86_num_cores - 1))
+ c->x86_num_cores = 1;
+
+#ifdef CONFIG_NUMA
+ /* On a dual core setup the lower bits of apic id
+ distingush the cores. Fix up the CPU<->node mappings
+ here based on that.
+ Assumes number of cores is a power of two.
+ When using SRAT use mapping from SRAT. */
+ cpu = c->x86_apicid;
+ if (acpi_numa <= 0 && c->x86_num_cores > 1) {
+ cpu_to_node[cpu] = cpu >> hweight32(c->x86_num_cores - 1);
+ if (!node_online(cpu_to_node[cpu]))
+ cpu_to_node[cpu] = first_node(node_online_map);
+ }
+ printk(KERN_INFO "CPU %d(%d) -> Node %d\n",
+ cpu, c->x86_num_cores, cpu_to_node[cpu]);
+#endif
+ }
+
+ return r;
+}
+
+static void __init detect_ht(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ u32 eax, ebx, ecx, edx;
+ int index_lsb, index_msb, tmp;
+ int cpu = smp_processor_id();
+
+ if (!cpu_has(c, X86_FEATURE_HT))
+ return;
+
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
+
+ if (smp_num_siblings == 1) {
+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
+ } else if (smp_num_siblings > 1) {
+ index_lsb = 0;
+ index_msb = 31;
+ /*
+ * At this point we only support two siblings per
+ * processor package.
+ */
+ if (smp_num_siblings > NR_CPUS) {
+ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
+ smp_num_siblings = 1;
+ return;
+ }
+ tmp = smp_num_siblings;
+ while ((tmp & 1) == 0) {
+ tmp >>=1 ;
+ index_lsb++;
+ }
+ tmp = smp_num_siblings;
+ while ((tmp & 0x80000000 ) == 0) {
+ tmp <<=1 ;
+ index_msb--;
+ }
+ if (index_lsb != index_msb )
+ index_msb++;
+ phys_proc_id[cpu] = phys_pkg_id(index_msb);
+
+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+ phys_proc_id[cpu]);
+ }
+#endif
+}
+
+static void __init sched_cmp_hack(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ /* AMD dual core looks like HT but isn't really. Hide it from the
+ scheduler. This works around problems with the domain scheduler.
+ Also probably gives slightly better scheduling and disables
+ SMT nice which is harmful on dual core.
+ TBD tune the domain scheduler for dual core. */
+ if (c->x86_vendor == X86_VENDOR_AMD && cpu_has(c, X86_FEATURE_CMP_LEGACY))
+ smp_num_siblings = 1;
+#endif
+}
+
+static void __init init_intel(struct cpuinfo_x86 *c)
+{
+ /* Cache sizes */
+ unsigned n;
+
+ init_intel_cacheinfo(c);
+ n = c->x86_cpuid_level;
+ if (n >= 0x80000008) {
+ unsigned eax = cpuid_eax(0x80000008);
+ c->x86_virt_bits = (eax >> 8) & 0xff;
+ c->x86_phys_bits = eax & 0xff;
+ }
+
+ if (c->x86 == 15)
+ c->x86_cache_alignment = c->x86_clflush_size * 2;
+}
+
+void __init get_cpu_vendor(struct cpuinfo_x86 *c)
+{
+ char *v = c->x86_vendor_id;
+
+ if (!strcmp(v, "AuthenticAMD"))
+ c->x86_vendor = X86_VENDOR_AMD;
+ else if (!strcmp(v, "GenuineIntel"))
+ c->x86_vendor = X86_VENDOR_INTEL;
+ else
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+}
+
+struct cpu_model_info {
+ int vendor;
+ int family;
+ char *model_names[16];
+};
+
+/* Do some early cpuid on the boot CPU to get some parameter that are
+ needed before check_bugs. Everything advanced is in identify_cpu
+ below. */
+void __init early_identify_cpu(struct cpuinfo_x86 *c)
+{
+ u32 tfms;
+
+ c->loops_per_jiffy = loops_per_jiffy;
+ c->x86_cache_size = -1;
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
+ c->x86_vendor_id[0] = '\0'; /* Unset */
+ c->x86_model_id[0] = '\0'; /* Unset */
+ c->x86_clflush_size = 64;
+ c->x86_cache_alignment = c->x86_clflush_size;
+ c->x86_num_cores = 1;
+ c->x86_apicid = c == &boot_cpu_data ? 0 : c - cpu_data;
+ c->x86_cpuid_level = 0;
+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+ /* Get vendor name */
+ cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
+ (unsigned int *)&c->x86_vendor_id[0],
+ (unsigned int *)&c->x86_vendor_id[8],
+ (unsigned int *)&c->x86_vendor_id[4]);
+
+ get_cpu_vendor(c);
+
+ /* Initialize the standard set of capabilities */
+ /* Note that the vendor-specific code below might override */
+
+ /* Intel-defined flags: level 0x00000001 */
+ if (c->cpuid_level >= 0x00000001) {
+ __u32 misc;
+ cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
+ &c->x86_capability[0]);
+ c->x86 = (tfms >> 8) & 0xf;
+ c->x86_model = (tfms >> 4) & 0xf;
+ c->x86_mask = tfms & 0xf;
+ if (c->x86 == 0xf) {
+ c->x86 += (tfms >> 20) & 0xff;
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ }
+ if (c->x86_capability[0] & (1<<19))
+ c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
+ c->x86_apicid = misc >> 24;
+ } else {
+ /* Have CPUID level 0 only - unheard of */
+ c->x86 = 4;
+ }
+}
+
+/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+void __init identify_cpu(struct cpuinfo_x86 *c)
+{
+ int i;
+ u32 xlvl;
+
+ early_identify_cpu(c);
+
+ /* AMD-defined flags: level 0x80000001 */
+ xlvl = cpuid_eax(0x80000000);
+ c->x86_cpuid_level = xlvl;
+ if ((xlvl & 0xffff0000) == 0x80000000) {
+ if (xlvl >= 0x80000001) {
+ c->x86_capability[1] = cpuid_edx(0x80000001);
+ c->x86_capability[5] = cpuid_ecx(0x80000001);
+ }
+ if (xlvl >= 0x80000004)
+ get_model_name(c); /* Default name */
+ }
+
+ /* Transmeta-defined flags: level 0x80860001 */
+ xlvl = cpuid_eax(0x80860000);
+ if ((xlvl & 0xffff0000) == 0x80860000) {
+ /* Don't set x86_cpuid_level here for now to not confuse. */
+ if (xlvl >= 0x80860001)
+ c->x86_capability[2] = cpuid_edx(0x80860001);
+ }
+
+ /*
+ * Vendor-specific initialization. In this section we
+ * canonicalize the feature flags, meaning if there are
+ * features a certain CPU supports which CPUID doesn't
+ * tell us, CPUID claiming incorrect flags, or other bugs,
+ * we handle them here.
+ *
+ * At the end of this section, c->x86_capability better
+ * indicate the features this CPU genuinely supports!
+ */
+ switch (c->x86_vendor) {
+ case X86_VENDOR_AMD:
+ init_amd(c);
+ break;
+
+ case X86_VENDOR_INTEL:
+ init_intel(c);
+ break;
+
+ case X86_VENDOR_UNKNOWN:
+ default:
+ display_cacheinfo(c);
+ break;
+ }
+
+ select_idle_routine(c);
+ detect_ht(c);
+ sched_cmp_hack(c);
+
+ /*
+ * On SMP, boot_cpu_data holds the common feature set between
+ * all CPUs; so make sure that we indicate which features are
+ * common between the CPUs. The first time this routine gets
+ * executed, c == &boot_cpu_data.
+ */
+ if (c != &boot_cpu_data) {
+ /* AND the already accumulated flags with these */
+ for (i = 0 ; i < NCAPINTS ; i++)
+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+ }
+
+#ifdef CONFIG_X86_MCE
+ mcheck_init(c);
+#endif
+#ifdef CONFIG_NUMA
+ if (c != &boot_cpu_data)
+ numa_add_cpu(c - cpu_data);
+#endif
+}
+
+
+void __init print_cpu_info(struct cpuinfo_x86 *c)
+{
+ if (c->x86_model_id[0])
+ printk("%s", c->x86_model_id);
+
+ if (c->x86_mask || c->cpuid_level >= 0)
+ printk(" stepping %02x\n", c->x86_mask);
+ else
+ printk("\n");
+}
+
+/*
+ * Get CPU information for use by the procfs.
+ */
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ struct cpuinfo_x86 *c = v;
+
+ /*
+ * These flag bits must match the definitions in <asm/cpufeature.h>.
+ * NULL means this bit is undefined or reserved; either way it doesn't
+ * have meaning as far as Linux is concerned. Note that it's important
+ * to realize there is a difference between this table and CPUID -- if
+ * applications want to get the raw CPUID data, they should access
+ * /dev/cpu/<cpu_nr>/cpuid instead.
+ */
+ static char *x86_cap_flags[] = {
+ /* Intel-defined */
+ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
+ "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
+ "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
+ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
+
+ /* AMD-defined */
+ "pni", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
+ NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
+
+ /* Transmeta-defined */
+ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* Other (Linux-defined) */
+ "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* Intel-defined (#2) */
+ "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
+ "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* AMD-defined (#2) */
+ "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
+ };
+ static char *x86_power_flags[] = {
+ "ts", /* temperature sensor */
+ "fid", /* frequency id control */
+ "vid", /* voltage id control */
+ "ttp", /* thermal trip */
+ };
+
+
+#ifdef CONFIG_SMP
+ if (!cpu_online(c-cpu_data))
+ return 0;
+#endif
+
+ seq_printf(m,"processor\t: %u\n"
+ "vendor_id\t: %s\n"
+ "cpu family\t: %d\n"
+ "model\t\t: %d\n"
+ "model name\t: %s\n",
+ (unsigned)(c-cpu_data),
+ c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
+ c->x86,
+ (int)c->x86_model,
+ c->x86_model_id[0] ? c->x86_model_id : "unknown");
+
+ if (c->x86_mask || c->cpuid_level >= 0)
+ seq_printf(m, "stepping\t: %d\n", c->x86_mask);
+ else
+ seq_printf(m, "stepping\t: unknown\n");
+
+ if (cpu_has(c,X86_FEATURE_TSC)) {
+ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
+ cpu_khz / 1000, (cpu_khz % 1000));
+ }
+
+ /* Cache size */
+ if (c->x86_cache_size >= 0)
+ seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
+
+#ifdef CONFIG_SMP
+ seq_printf(m, "physical id\t: %d\n", phys_proc_id[c - cpu_data]);
+ seq_printf(m, "siblings\t: %d\n", c->x86_num_cores * smp_num_siblings);
+#endif
+
+ seq_printf(m,
+ "fpu\t\t: yes\n"
+ "fpu_exception\t: yes\n"
+ "cpuid level\t: %d\n"
+ "wp\t\t: yes\n"
+ "flags\t\t:",
+ c->cpuid_level);
+
+ {
+ int i;
+ for ( i = 0 ; i < 32*NCAPINTS ; i++ )
+ if ( test_bit(i, &c->x86_capability) &&
+ x86_cap_flags[i] != NULL )
+ seq_printf(m, " %s", x86_cap_flags[i]);
+ }
+
+ seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
+ c->loops_per_jiffy/(500000/HZ),
+ (c->loops_per_jiffy/(5000/HZ)) % 100);
+
+ if (c->x86_tlbsize > 0)
+ seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
+ seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
+ seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
+
+ seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
+ c->x86_phys_bits, c->x86_virt_bits);
+
+ seq_printf(m, "power management:");
+ {
+ unsigned i;
+ for (i = 0; i < 32; i++)
+ if (c->x86_power & (1 << i)) {
+ if (i < ARRAY_SIZE(x86_power_flags))
+ seq_printf(m, " %s", x86_power_flags[i]);
+ else
+ seq_printf(m, " [%d]", i);
+ }
+ }
+ seq_printf(m, "\n");
+
+ if (c->x86_num_cores > 1)
+ seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
+
+ seq_printf(m, "\n\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+ .start =c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup64.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup64.c
new file mode 100644
index 0000000000..32b813661f
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup64.c
@@ -0,0 +1,346 @@
+/*
+ * X86-64 specific CPU setup.
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
+ * See setup.c for older changelog.
+ * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen
+ *
+ */
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+#include <linux/bitops.h>
+#include <asm/pda.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/desc.h>
+#include <asm/atomic.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/i387.h>
+#include <asm/percpu.h>
+#include <asm/mtrr.h>
+#include <asm/proto.h>
+#include <asm/mman.h>
+#include <asm/numa.h>
+
+#include <asm-xen/hypervisor.h>
+
+char x86_boot_params[2048] __initdata = {0,};
+
+cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+
+struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned;
+
+extern struct task_struct init_task;
+
+extern unsigned char __per_cpu_start[], __per_cpu_end[];
+
+extern struct desc_ptr cpu_gdt_descr[];
+struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
+
+char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
+
+unsigned long __supported_pte_mask = ~0UL;
+static int do_not_nx __initdata = 0;
+
+/* noexec=on|off
+Control non executable mappings for 64bit processes.
+
+on Enable(default)
+off Disable
+*/
+int __init nonx_setup(char *str)
+{
+ if (!strncmp(str, "on", 2)) {
+ __supported_pte_mask |= _PAGE_NX;
+ do_not_nx = 0;
+ } else if (!strncmp(str, "off", 3)) {
+ do_not_nx = 1;
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+ return 0;
+}
+__setup("noexec=", nonx_setup); /* parsed early actually */
+
+int force_personality32 = READ_IMPLIES_EXEC;
+
+/* noexec32=on|off
+Control non executable heap for 32bit processes.
+To control the stack too use noexec=off
+
+on PROT_READ does not imply PROT_EXEC for 32bit processes
+off PROT_READ implies PROT_EXEC (default)
+*/
+static int __init nonx32_setup(char *str)
+{
+ if (!strcmp(str, "on"))
+ force_personality32 &= ~READ_IMPLIES_EXEC;
+ else if (!strcmp(str, "off"))
+ force_personality32 |= READ_IMPLIES_EXEC;
+ return 0;
+}
+__setup("noexec32=", nonx32_setup);
+
+/*
+ * Great future plan:
+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
+ * Always point %gs to its beginning
+ */
+void __init setup_per_cpu_areas(void)
+{
+ int i;
+ unsigned long size;
+
+ /* Copy section for each CPU (we discard the original) */
+ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
+#ifdef CONFIG_MODULES
+ if (size < PERCPU_ENOUGH_ROOM)
+ size = PERCPU_ENOUGH_ROOM;
+#endif
+
+ for (i = 0; i < NR_CPUS; i++) {
+ unsigned char *ptr;
+
+ if (!NODE_DATA(cpu_to_node(i))) {
+ printk("cpu with no node %d, num_online_nodes %d\n",
+ i, num_online_nodes());
+ ptr = alloc_bootmem(size);
+ } else {
+ ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
+ }
+ if (!ptr)
+ panic("Cannot allocate cpu data for CPU %d\n", i);
+ cpu_pda[i].data_offset = ptr - __per_cpu_start;
+ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ }
+}
+
+void pda_init(int cpu)
+{
+ pgd_t *old_level4 = (pgd_t *)xen_start_info.pt_base;
+ struct x8664_pda *pda = &cpu_pda[cpu];
+
+ /* Setup up data that may be needed in __get_free_pages early */
+ asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
+ HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL,
+ (unsigned long)(cpu_pda + cpu));
+
+ pda->me = pda;
+ pda->cpunumber = cpu;
+ pda->irqcount = -1;
+ pda->kernelstack =
+ (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
+ pda->active_mm = &init_mm;
+ pda->mmu_state = 0;
+ pda->kernel_mode = 1;
+
+ if (cpu == 0) {
+ memcpy((void *)init_level4_pgt,
+ (void *) xen_start_info.pt_base, PAGE_SIZE);
+ /* others are initialized in smpboot.c */
+ pda->pcurrent = &init_task;
+ pda->irqstackptr = boot_cpu_stack;
+ make_page_readonly(init_level4_pgt);
+ make_page_readonly(init_level4_user_pgt);
+ make_page_readonly(level3_user_pgt); /* for vsyscall stuff */
+ xen_pgd_pin(__pa_symbol(init_level4_user_pgt));
+ xen_pud_pin(__pa_symbol(level3_user_pgt));
+ set_pgd((pgd_t *)(init_level4_user_pgt + 511),
+ mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
+ } else {
+ pda->irqstackptr = (char *)
+ __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
+ if (!pda->irqstackptr)
+ panic("cannot allocate irqstack for cpu %d", cpu);
+ }
+
+ xen_pt_switch(__pa(init_level4_pgt));
+ xen_new_user_pt(__pa(init_level4_user_pgt));
+
+ if (cpu == 0) {
+ xen_pgd_unpin(__pa(old_level4));
+#if 0
+ early_printk("__pa: %x, <machine_phys> old_level 4 %x\n",
+ __pa(xen_start_info.pt_base),
+ pfn_to_mfn(__pa(old_level4) >> PAGE_SHIFT));
+#endif
+// make_page_writable(old_level4);
+// free_bootmem(__pa(old_level4), PAGE_SIZE);
+ }
+
+ pda->irqstackptr += IRQSTACKSIZE-64;
+}
+
+char boot_exception_stacks[N_EXCEPTION_STACKS * EXCEPTION_STKSZ]
+__attribute__((section(".bss.page_aligned")));
+
+/* May not be marked __init: used by software suspend */
+void syscall_init(void)
+{
+#ifdef CONFIG_IA32_EMULATION
+ syscall32_cpu_init ();
+#endif
+}
+
+void __init check_efer(void)
+{
+ unsigned long efer;
+
+ /* rdmsrl(MSR_EFER, efer); */
+
+ /*
+ * At this point, Xen does not like the bit 63.
+ * So NX is not supported. Come back later.
+ */
+ efer = 0;
+
+ if (!(efer & EFER_NX) || do_not_nx) {
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+}
+
+void __init cpu_gdt_init(struct desc_ptr *gdt_descr)
+{
+ unsigned long frames[gdt_descr->size >> PAGE_SHIFT];
+ unsigned long va;
+ int f;
+
+ for (va = gdt_descr->address, f = 0;
+ va < gdt_descr->address + gdt_descr->size;
+ va += PAGE_SIZE, f++) {
+ frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
+ make_page_readonly((void *)va);
+ }
+ if (HYPERVISOR_set_gdt(frames, gdt_descr->size /
+ sizeof (struct desc_struct)))
+ BUG();
+}
+
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ * A lot of state is already set up in PDA init.
+ */
+void __init cpu_init (void)
+{
+#ifdef CONFIG_SMP
+ int cpu = stack_smp_processor_id();
+#else
+ int cpu = smp_processor_id();
+#endif
+ struct tss_struct *t = &per_cpu(init_tss, cpu);
+ unsigned long v;
+ char *estacks = NULL;
+ struct task_struct *me;
+ int i;
+
+ /* CPU 0 is initialised in head64.c */
+ if (cpu != 0) {
+ pda_init(cpu);
+ } else
+ estacks = boot_exception_stacks;
+
+ me = current;
+
+ if (test_and_set_bit(cpu, &cpu_initialized))
+ panic("CPU#%d already initialized!\n", cpu);
+
+ printk("Initializing CPU#%d\n", cpu);
+
+#if 0
+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+#endif
+ /*
+ * Initialize the per-CPU GDT with the boot GDT,
+ * and set up the GDT descriptor:
+ */
+ if (cpu) {
+ memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
+ }
+
+ cpu_gdt_descr[cpu].size = GDT_SIZE;
+ cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
+#if 0
+ asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
+ asm volatile("lidt %0" :: "m" (idt_descr));
+#endif
+ cpu_gdt_init(&cpu_gdt_descr[cpu]);
+
+#if 0
+ memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
+
+#endif
+ memcpy(me->thread.tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
+ GDT_ENTRY_TLS_ENTRIES * 8);
+
+ /*
+ * Delete NT
+ */
+
+ asm volatile("pushfq ; popq %%rax ; btr $14,%%rax ; pushq %%rax ; popfq" ::: "eax");
+
+ if (cpu == 0)
+ early_identify_cpu(&boot_cpu_data);
+
+ syscall_init();
+
+ barrier();
+ check_efer();
+
+ /*
+ * set up and load the per-CPU TSS
+ */
+ for (v = 0; v < N_EXCEPTION_STACKS; v++) {
+ if (cpu) {
+ estacks = (char *)__get_free_pages(GFP_ATOMIC,
+ EXCEPTION_STACK_ORDER);
+ if (!estacks)
+ panic("Cannot allocate exception stack %ld %d\n",
+ v, cpu);
+ }
+ estacks += EXCEPTION_STKSZ;
+ t->ist[v] = (unsigned long)estacks;
+ }
+
+ t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+ /*
+ * <= is required because the CPU will access up to
+ * 8 bits beyond the end of the IO permission bitmap.
+ */
+ for (i = 0; i <= IO_BITMAP_LONGS; i++)
+ t->io_bitmap[i] = ~0UL;
+
+ atomic_inc(&init_mm.mm_count);
+ me->active_mm = &init_mm;
+ if (me->mm)
+ BUG();
+ enter_lazy_tlb(&init_mm, me);
+
+ load_LDT(&init_mm.context);
+
+ /*
+ * Clear all 6 debug registers:
+ */
+#define CD(register) HYPERVISOR_set_debugreg(register, 0)
+
+ CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
+
+#undef CD
+#if 0
+ fpu_init();
+#endif
+
+#ifdef CONFIG_NUMA
+ numa_add_cpu(cpu);
+#endif
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/signal.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/signal.c
new file mode 100644
index 0000000000..2282257d98
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/signal.c
@@ -0,0 +1,493 @@
+/*
+ * linux/arch/x86_64/kernel/signal.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
+ *
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
+ * 2000-2002 x86-64 support by Andi Kleen
+ *
+ * $Id: signal.c,v 1.18 2001/10/17 22:30:37 ak Exp $
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <linux/compiler.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/i387.h>
+#include <asm/proto.h>
+
+/* #define DEBUG_SIG 1 */
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs * regs);
+void ia32_setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs * regs);
+
+asmlinkage long
+sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+#ifdef DEBUG_SIG
+ printk("rt_sigsuspend savset(%lx) newset(%lx) regs(%p) rip(%lx)\n",
+ saveset, newset, regs, regs->rip);
+#endif
+ regs->rax = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage long
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+ struct pt_regs *regs)
+{
+ return do_sigaltstack(uss, uoss, regs->rsp);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+struct rt_sigframe
+{
+ char *pretcode;
+ struct ucontext uc;
+ struct siginfo info;
+};
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *prax)
+{
+ unsigned int err = 0;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->x)
+
+ COPY(rdi); COPY(rsi); COPY(rbp); COPY(rsp); COPY(rbx);
+ COPY(rdx); COPY(rcx); COPY(rip);
+ COPY(r8);
+ COPY(r9);
+ COPY(r10);
+ COPY(r11);
+ COPY(r12);
+ COPY(r13);
+ COPY(r14);
+ COPY(r15);
+
+ {
+ unsigned int tmpflags;
+ err |= __get_user(tmpflags, &sc->eflags);
+ regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
+ regs->orig_rax = -1; /* disable syscall checks */
+ }
+
+ {
+ struct _fpstate __user * buf;
+ err |= __get_user(buf, &sc->fpstate);
+
+ if (buf) {
+ if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
+ goto badframe;
+ err |= restore_i387(buf);
+ } else {
+ struct task_struct *me = current;
+ if (used_math()) {
+ clear_fpu(me);
+ clear_used_math();
+ }
+ }
+ }
+
+ err |= __get_user(*prax, &sc->rax);
+ return err;
+
+badframe:
+ return 1;
+}
+
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ sigset_t set;
+ unsigned long eax;
+
+ frame = (struct rt_sigframe __user *)(regs->rsp - 8);
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame))) {
+ goto badframe;
+ }
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) {
+ goto badframe;
+ }
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax)) {
+ goto badframe;
+ }
+
+#ifdef DEBUG_SIG
+ printk("%d sigreturn rip:%lx rsp:%lx frame:%p rax:%lx\n",current->pid,regs.rip,regs.rsp,frame,eax);
+#endif
+
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->rsp) == -EFAULT)
+ goto badframe;
+
+ return eax;
+
+badframe:
+ signal_fault(regs,frame,"sigreturn");
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static inline int
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me)
+{
+ int err = 0;
+ unsigned long eflags;
+
+ err |= __put_user(0, &sc->gs);
+ err |= __put_user(0, &sc->fs);
+
+ err |= __put_user(regs->rdi, &sc->rdi);
+ err |= __put_user(regs->rsi, &sc->rsi);
+ err |= __put_user(regs->rbp, &sc->rbp);
+ err |= __put_user(regs->rsp, &sc->rsp);
+ err |= __put_user(regs->rbx, &sc->rbx);
+ err |= __put_user(regs->rdx, &sc->rdx);
+ err |= __put_user(regs->rcx, &sc->rcx);
+ err |= __put_user(regs->rax, &sc->rax);
+ err |= __put_user(regs->r8, &sc->r8);
+ err |= __put_user(regs->r9, &sc->r9);
+ err |= __put_user(regs->r10, &sc->r10);
+ err |= __put_user(regs->r11, &sc->r11);
+ err |= __put_user(regs->r12, &sc->r12);
+ err |= __put_user(regs->r13, &sc->r13);
+ err |= __put_user(regs->r14, &sc->r14);
+ err |= __put_user(regs->r15, &sc->r15);
+ err |= __put_user(me->thread.trap_no, &sc->trapno);
+ err |= __put_user(me->thread.error_code, &sc->err);
+ err |= __put_user(regs->rip, &sc->rip);
+ eflags = regs->eflags;
+ if (current->ptrace & PT_PTRACED) {
+ eflags &= ~TF_MASK;
+ }
+ err |= __put_user(eflags, &sc->eflags);
+ err |= __put_user(mask, &sc->oldmask);
+ err |= __put_user(me->thread.cr2, &sc->cr2);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+
+static void __user *
+get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
+{
+ unsigned long rsp;
+
+ /* Default to using normal stack - redzone*/
+ rsp = regs->rsp - 128;
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ /* RED-PEN: redzone on that stack? */
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (sas_ss_flags(rsp) == 0)
+ rsp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+ return (void __user *)round_down(rsp - size, 16);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs * regs)
+{
+ struct rt_sigframe __user *frame;
+ struct _fpstate __user *fp = NULL;
+ int err = 0;
+ struct task_struct *me = current;
+
+ if (used_math()) {
+ fp = get_stack(ka, regs, sizeof(struct _fpstate));
+ frame = (void __user *)round_down((unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
+
+ if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) {
+ goto give_sigsegv;
+ }
+
+ if (save_i387(fp) < 0)
+ err |= -1;
+ } else {
+ frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
+ }
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) {
+ goto give_sigsegv;
+ }
+
+ if (ka->sa.sa_flags & SA_SIGINFO) {
+ err |= copy_siginfo_to_user(&frame->info, info);
+ if (err) {
+ goto give_sigsegv;
+ }
+ }
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->rsp),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
+ err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
+ if (sizeof(*set) == 16) {
+ __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
+ __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
+ } else {
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ }
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ /* x86-64 should always use SA_RESTORER. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
+ } else {
+ /* could use a vstub here */
+ goto give_sigsegv;
+ }
+
+ if (err) {
+ goto give_sigsegv;
+ }
+
+#ifdef DEBUG_SIG
+ printk("%d old rip %lx old rsp %lx old rax %lx\n", current->pid,regs->rip,regs->rsp,regs->rax);
+#endif
+
+ /* Set up registers for signal handler */
+ {
+ struct exec_domain *ed = current_thread_info()->exec_domain;
+ if (unlikely(ed && ed->signal_invmap && sig < 32))
+ sig = ed->signal_invmap[sig];
+ }
+ regs->rdi = sig;
+ /* In case the signal handler was declared without prototypes */
+ regs->rax = 0;
+
+ /* This also works for non SA_SIGINFO handlers because they expect the
+ next argument after the signal number on the stack. */
+ regs->rsi = (unsigned long)&frame->info;
+ regs->rdx = (unsigned long)&frame->uc;
+ regs->rip = (unsigned long) ka->sa.sa_handler;
+
+ regs->rsp = (unsigned long)frame;
+
+ set_fs(USER_DS);
+ if (regs->eflags & TF_MASK) {
+ if ((current->ptrace & (PT_PTRACED | PT_DTRACE)) == (PT_PTRACED | PT_DTRACE)) {
+ ptrace_notify(SIGTRAP);
+ } else {
+ regs->eflags &= ~TF_MASK;
+ }
+ }
+
+#ifdef DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+ current->comm, current->pid, frame, regs->rip, frame->pretcode);
+#endif
+
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+
+static void
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs *regs)
+{
+#ifdef DEBUG_SIG
+ printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n", current->pid, sig,
+ regs->rip, regs->rsp, regs);
+#endif
+
+ /* Are we from a system call? */
+ if ((long)regs->orig_rax >= 0) {
+ /* If so, check system call restarting.. */
+ switch (regs->rax) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->rax = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->rax = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->rax = regs->orig_rax;
+ regs->rip -= 2;
+ }
+ }
+
+#ifdef CONFIG_IA32_EMULATION
+ if (test_thread_flag(TIF_IA32)) {
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ ia32_setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ ia32_setup_frame(sig, ka, oldset, regs);
+ } else
+#endif
+ setup_rt_frame(sig, ka, info, oldset, regs);
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+ struct k_sigaction ka;
+ siginfo_t info;
+ int signr;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if ((regs->cs & 2) != 2) {
+ return 1;
+ }
+
+ if (try_to_freeze(0))
+ goto no_signal;
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ /* Reenable any watchpoints before delivering the
+ * signal to user space. The processor register will
+ * have been cleared if the watchpoint triggered
+ * inside the kernel.
+ */
+ if (current->thread.debugreg7) {
+ HYPERVISOR_set_debugreg(7,
+ current->thread.debugreg7);
+ }
+
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &info, &ka, oldset, regs);
+ return 1;
+ }
+
+ no_signal:
+ /* Did we come from a system call? */
+ if ((long)regs->orig_rax >= 0) {
+ /* Restart the system call - no handlers present */
+ long res = regs->rax;
+ if (res == -ERESTARTNOHAND ||
+ res == -ERESTARTSYS ||
+ res == -ERESTARTNOINTR) {
+ regs->rax = regs->orig_rax;
+ regs->rip -= 2;
+ }
+ if (regs->rax == (unsigned long)-ERESTART_RESTARTBLOCK) {
+ regs->rax = __NR_restart_syscall;
+ regs->rip -= 2;
+ }
+ }
+ return 0;
+}
+
+void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, __u32 thread_info_flags)
+{
+#ifdef DEBUG_SIG
+ printk("do_notify_resume flags:%x rip:%lx rsp:%lx caller:%lx pending:%lx\n",
+ thread_info_flags, regs->rip, regs->rsp, __builtin_return_address(0),signal_pending(current));
+#endif
+
+ /* Pending single-step? */
+ if (thread_info_flags & _TIF_SINGLESTEP) {
+ regs->eflags |= TF_MASK;
+ clear_thread_flag(TIF_SINGLESTEP);
+ }
+
+ /* deal with pending signal delivery */
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(regs,oldset);
+}
+
+void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
+{
+ struct task_struct *me = current;
+ if (exception_trace)
+ printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n",
+ me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax);
+
+ force_sig(SIGSEGV, me);
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/smp.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/smp.c
new file mode 100644
index 0000000000..b183fafacd
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/smp.c
@@ -0,0 +1,411 @@
+/*
+ * Intel SMP support routines.
+ *
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ * (c) 2002,2003 Andi Kleen, SuSE Labs.
+ *
+ * This code is released under the GNU General Public License version 2 or
+ * later.
+ */
+
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/smp.h>
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/interrupt.h>
+
+#include <asm/mtrr.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/mach_apic.h>
+#include <asm/proto.h>
+
+/*
+ * Smarter SMP flushing macros.
+ * c/o Linus Torvalds.
+ *
+ * These mean you can really definitely utterly forget about
+ * writing to user space from interrupts. (Its not allowed anyway).
+ *
+ * Optimizations Manfred Spraul <manfred@colorfullife.com>
+ */
+
+static cpumask_t flush_cpumask;
+static struct mm_struct * flush_mm;
+static unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+#define FLUSH_ALL 0xffffffff
+
+/*
+ * We cannot call mmdrop() because we are in interrupt context,
+ * instead update mm->cpu_vm_mask.
+ */
+static inline void leave_mm (unsigned long cpu)
+{
+ if (read_pda(mmu_state) == TLBSTATE_OK)
+ BUG();
+ clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask);
+ __flush_tlb();
+}
+
+/*
+ *
+ * The flush IPI assumes that a thread switch happens in this order:
+ * [cpu0: the cpu that switches]
+ * 1) switch_mm() either 1a) or 1b)
+ * 1a) thread switch to a different mm
+ * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
+ * Stop ipi delivery for the old mm. This is not synchronized with
+ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
+ * for the wrong mm, and in the worst case we perform a superfluous
+ * tlb flush.
+ * 1a2) set cpu mmu_state to TLBSTATE_OK
+ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
+ * was in lazy tlb mode.
+ * 1a3) update cpu active_mm
+ * Now cpu0 accepts tlb flushes for the new mm.
+ * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
+ * Now the other cpus will send tlb flush ipis.
+ * 1a4) change cr3.
+ * 1b) thread switch without mm change
+ * cpu active_mm is correct, cpu0 already handles
+ * flush ipis.
+ * 1b1) set cpu mmu_state to TLBSTATE_OK
+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
+ * Atomically set the bit [other cpus will start sending flush ipis],
+ * and test the bit.
+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
+ * 2) switch %%esp, ie current
+ *
+ * The interrupt must handle 2 special cases:
+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
+ * runs in kernel space, the cpu could load tlb entries for user space
+ * pages.
+ *
+ * The good news is that cpu mmu_state is local to each cpu, no
+ * write/read ordering problems.
+ */
+
+/*
+ * TLB flush IPI:
+ *
+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
+ * 2) Leave the mm if we are in the lazy tlb mode.
+ */
+
+asmlinkage void smp_invalidate_interrupt (void)
+{
+ unsigned long cpu;
+
+ cpu = get_cpu();
+
+ if (!cpu_isset(cpu, flush_cpumask))
+ goto out;
+ /*
+ * This was a BUG() but until someone can quote me the
+ * line from the intel manual that guarantees an IPI to
+ * multiple CPUs is retried _only_ on the erroring CPUs
+ * its staying as a return
+ *
+ * BUG();
+ */
+
+ if (flush_mm == read_pda(active_mm)) {
+ if (read_pda(mmu_state) == TLBSTATE_OK) {
+ if (flush_va == FLUSH_ALL)
+ local_flush_tlb();
+ else
+ __flush_tlb_one(flush_va);
+ } else
+ leave_mm(cpu);
+ }
+ ack_APIC_irq();
+ cpu_clear(cpu, flush_cpumask);
+
+out:
+ put_cpu_no_resched();
+}
+
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ unsigned long va)
+{
+ cpumask_t tmp;
+ /*
+ * A couple of (to be removed) sanity checks:
+ *
+ * - we do not send IPIs to not-yet booted CPUs.
+ * - current CPU must not be in mask
+ * - mask must exist :)
+ */
+ BUG_ON(cpus_empty(cpumask));
+ cpus_and(tmp, cpumask, cpu_online_map);
+ BUG_ON(!cpus_equal(tmp, cpumask));
+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+ if (!mm)
+ BUG();
+
+ /*
+ * I'm not happy about this global shared spinlock in the
+ * MM hot path, but we'll see how contended it is.
+ * Temporarily this turns IRQs off, so that lockups are
+ * detected by the NMI watchdog.
+ */
+ spin_lock(&tlbstate_lock);
+
+ flush_mm = mm;
+ flush_va = va;
+ cpus_or(flush_cpumask, cpumask, flush_cpumask);
+
+ /*
+ * We have to send the IPI only to
+ * CPUs affected.
+ */
+ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
+
+ while (!cpus_empty(flush_cpumask))
+ mb(); /* nothing. lockup detection does not belong here */;
+
+ flush_mm = NULL;
+ flush_va = 0;
+ spin_unlock(&tlbstate_lock);
+}
+
+void flush_tlb_current_task(void)
+{
+ struct mm_struct *mm = current->mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ preempt_enable();
+}
+
+void flush_tlb_mm (struct mm_struct * mm)
+{
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ if (current->active_mm == mm) {
+ if (current->mm)
+ local_flush_tlb();
+ else
+ leave_mm(smp_processor_id());
+ }
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ if (current->active_mm == mm) {
+ if(current->mm)
+ __flush_tlb_one(va);
+ else
+ leave_mm(smp_processor_id());
+ }
+
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, va);
+
+ preempt_enable();
+}
+
+static void do_flush_tlb_all(void* info)
+{
+ unsigned long cpu = smp_processor_id();
+
+ __flush_tlb_all();
+ if (read_pda(mmu_state) == TLBSTATE_LAZY)
+ leave_mm(cpu);
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
+}
+
+void smp_kdb_stop(void)
+{
+ send_IPI_allbutself(KDB_VECTOR);
+}
+
+/*
+ * this function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+
+void smp_send_reschedule(int cpu)
+{
+ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
+}
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ */
+static DEFINE_SPINLOCK(call_lock);
+
+struct call_data_struct {
+ void (*func) (void *info);
+ void *info;
+ atomic_t started;
+ atomic_t finished;
+ int wait;
+};
+
+static struct call_data_struct * call_data;
+
+/*
+ * this function sends a 'generic call function' IPI to all other CPUs
+ * in the system.
+ */
+static void __smp_call_function (void (*func) (void *info), void *info,
+ int nonatomic, int wait)
+{
+ struct call_data_struct data;
+ int cpus = num_online_cpus()-1;
+
+ if (!cpus)
+ return;
+
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+ call_data = &data;
+ wmb();
+ /* Send a message to all other CPUs and wait for them to respond */
+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+
+ /* Wait for response */
+ while (atomic_read(&data.started) != cpus)
+ cpu_relax();
+
+ if (!wait)
+ return;
+
+ while (atomic_read(&data.finished) != cpus)
+ cpu_relax();
+}
+
+/*
+ * smp_call_function - run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: currently unused.
+ * @wait: If true, wait (atomically) until function has completed on other
+ * CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute func or are or have executed.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ * Actually there are a few legal cases, like panic.
+ */
+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+{
+ spin_lock(&call_lock);
+ __smp_call_function(func,info,nonatomic,wait);
+ spin_unlock(&call_lock);
+ return 0;
+}
+
+void smp_stop_cpu(void)
+{
+ /*
+ * Remove this CPU:
+ */
+ cpu_clear(smp_processor_id(), cpu_online_map);
+ local_irq_disable();
+ disable_local_APIC();
+ local_irq_enable();
+}
+
+static void smp_really_stop_cpu(void *dummy)
+{
+ smp_stop_cpu();
+ for (;;)
+ asm("hlt");
+}
+
+void smp_send_stop(void)
+{
+ int nolock = 0;
+ if (reboot_force)
+ return;
+ /* Don't deadlock on the call lock in panic */
+ if (!spin_trylock(&call_lock)) {
+ /* ignore locking because we have paniced anyways */
+ nolock = 1;
+ }
+ __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
+ if (!nolock)
+ spin_unlock(&call_lock);
+ smp_stop_cpu();
+}
+
+/*
+ * Reschedule call back. Nothing to do,
+ * all the work is done automatically when
+ * we return from the interrupt.
+ */
+asmlinkage void smp_reschedule_interrupt(void)
+{
+ ack_APIC_irq();
+}
+
+asmlinkage void smp_call_function_interrupt(void)
+{
+ void (*func) (void *info) = call_data->func;
+ void *info = call_data->info;
+ int wait = call_data->wait;
+
+ ack_APIC_irq();
+ /*
+ * Notify initiating CPU that I've grabbed the data and am
+ * about to execute the function
+ */
+ mb();
+ atomic_inc(&call_data->started);
+ /*
+ * At this point the info structure may be out of scope unless wait==1
+ */
+ irq_enter();
+ (*func)(info);
+ irq_exit();
+ if (wait) {
+ mb();
+ atomic_inc(&call_data->finished);
+ }
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/smpboot.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/smpboot.c
new file mode 100644
index 0000000000..a7e2c3e95e
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/smpboot.c
@@ -0,0 +1,958 @@
+/*
+ * x86 SMP booting functions
+ *
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ * Copyright 2001 Andi Kleen, SuSE Labs.
+ *
+ * Much of the core SMP work is based on previous work by Thomas Radke, to
+ * whom a great many thanks are extended.
+ *
+ * Thanks to Intel for making available several different Pentium,
+ * Pentium Pro and Pentium-II/Xeon MP machines.
+ * Original development of Linux SMP code supported by Caldera.
+ *
+ * This code is released under the GNU General Public License version 2 or
+ * later.
+ *
+ * Fixes
+ * Felix Koop : NR_CPUS used properly
+ * Jose Renau : Handle single CPU case.
+ * Alan Cox : By repeated request 8) - Total BogoMIP report.
+ * Greg Wright : Fix for kernel stacks panic.
+ * Erich Boleyn : MP v1.4 and additional changes.
+ * Matthias Sattler : Changes for 2.1 kernel map.
+ * Michel Lespinasse : Changes for 2.1 kernel map.
+ * Michael Chastain : Change trampoline.S to gnu as.
+ * Alan Cox : Dumb bug: 'B' step PPro's are fine
+ * Ingo Molnar : Added APIC timers, based on code
+ * from Jose Renau
+ * Ingo Molnar : various cleanups and rewrites
+ * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs
+ * Andi Kleen : Changed for SMP boot into long mode.
+ * Rusty Russell : Hacked into shape for new "hotplug" boot process.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/kernel_stat.h>
+#include <linux/smp_lock.h>
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+#include <linux/thread_info.h>
+#include <linux/module.h>
+
+#include <linux/delay.h>
+#include <linux/mc146818rtc.h>
+#include <asm/mtrr.h>
+#include <asm/pgalloc.h>
+#include <asm/desc.h>
+#include <asm/kdebug.h>
+#include <asm/tlbflush.h>
+#include <asm/proto.h>
+
+/* Number of siblings per CPU package */
+int smp_num_siblings = 1;
+/* Package ID of each logical CPU */
+u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+EXPORT_SYMBOL(phys_proc_id);
+
+/* Bitmask of currently online CPUs */
+cpumask_t cpu_online_map;
+
+cpumask_t cpu_callin_map;
+cpumask_t cpu_callout_map;
+static cpumask_t smp_commenced_mask;
+
+/* Per CPU bogomips and other parameters */
+struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
+
+/* Set when the idlers are all forked */
+int smp_threads_ready;
+
+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+
+/*
+ * Trampoline 80x86 program as an array.
+ */
+
+extern unsigned char trampoline_data [];
+extern unsigned char trampoline_end [];
+
+/*
+ * Currently trivial. Write the real->protected mode
+ * bootstrap into the page concerned. The caller
+ * has made sure it's suitably aligned.
+ */
+
+static unsigned long __init setup_trampoline(void)
+{
+ void *tramp = __va(SMP_TRAMPOLINE_BASE);
+ extern volatile __u32 tramp_gdt_ptr;
+ tramp_gdt_ptr = __pa_symbol(&cpu_gdt_table);
+ memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
+ return virt_to_phys(tramp);
+}
+
+/*
+ * The bootstrap kernel entry code has set these up. Save them for
+ * a given CPU
+ */
+
+static void __init smp_store_cpu_info(int id)
+{
+ struct cpuinfo_x86 *c = cpu_data + id;
+
+ *c = boot_cpu_data;
+ identify_cpu(c);
+}
+
+/*
+ * TSC synchronization.
+ *
+ * We first check whether all CPUs have their TSC's synchronized,
+ * then we print a warning if not, and always resync.
+ */
+
+static atomic_t tsc_start_flag = ATOMIC_INIT(0);
+static atomic_t tsc_count_start = ATOMIC_INIT(0);
+static atomic_t tsc_count_stop = ATOMIC_INIT(0);
+static unsigned long long tsc_values[NR_CPUS];
+
+#define NR_LOOPS 5
+
+extern unsigned int fast_gettimeoffset_quotient;
+
+static void __init synchronize_tsc_bp (void)
+{
+ int i;
+ unsigned long long t0;
+ unsigned long long sum, avg;
+ long long delta;
+ long one_usec;
+ int buggy = 0;
+
+ printk(KERN_INFO "checking TSC synchronization across %u CPUs: ",num_booting_cpus());
+
+ one_usec = cpu_khz;
+
+ atomic_set(&tsc_start_flag, 1);
+ wmb();
+
+ /*
+ * We loop a few times to get a primed instruction cache,
+ * then the last pass is more or less synchronized and
+ * the BP and APs set their cycle counters to zero all at
+ * once. This reduces the chance of having random offsets
+ * between the processors, and guarantees that the maximum
+ * delay between the cycle counters is never bigger than
+ * the latency of information-passing (cachelines) between
+ * two CPUs.
+ */
+ for (i = 0; i < NR_LOOPS; i++) {
+ /*
+ * all APs synchronize but they loop on '== num_cpus'
+ */
+ while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) mb();
+ atomic_set(&tsc_count_stop, 0);
+ wmb();
+ /*
+ * this lets the APs save their current TSC:
+ */
+ atomic_inc(&tsc_count_start);
+
+ sync_core();
+ rdtscll(tsc_values[smp_processor_id()]);
+ /*
+ * We clear the TSC in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ write_tsc(0, 0);
+
+ /*
+ * Wait for all APs to leave the synchronization point:
+ */
+ while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) mb();
+ atomic_set(&tsc_count_start, 0);
+ wmb();
+ atomic_inc(&tsc_count_stop);
+ }
+
+ sum = 0;
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_isset(i, cpu_callout_map)) {
+ t0 = tsc_values[i];
+ sum += t0;
+ }
+ }
+ avg = sum / num_booting_cpus();
+
+ sum = 0;
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_isset(i, cpu_callout_map))
+ continue;
+
+ delta = tsc_values[i] - avg;
+ if (delta < 0)
+ delta = -delta;
+ /*
+ * We report bigger than 2 microseconds clock differences.
+ */
+ if (delta > 2*one_usec) {
+ long realdelta;
+ if (!buggy) {
+ buggy = 1;
+ printk("\n");
+ }
+ realdelta = delta / one_usec;
+ if (tsc_values[i] < avg)
+ realdelta = -realdelta;
+
+ printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n",
+ i, realdelta);
+ }
+
+ sum += delta;
+ }
+ if (!buggy)
+ printk("passed.\n");
+}
+
+static void __init synchronize_tsc_ap (void)
+{
+ int i;
+
+ /*
+ * Not every cpu is online at the time
+ * this gets called, so we first wait for the BP to
+ * finish SMP initialization:
+ */
+ while (!atomic_read(&tsc_start_flag)) mb();
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ atomic_inc(&tsc_count_start);
+ while (atomic_read(&tsc_count_start) != num_booting_cpus()) mb();
+
+ sync_core();
+ rdtscll(tsc_values[smp_processor_id()]);
+ if (i == NR_LOOPS-1)
+ write_tsc(0, 0);
+
+ atomic_inc(&tsc_count_stop);
+ while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
+ }
+}
+#undef NR_LOOPS
+
+static atomic_t init_deasserted;
+
+void __init smp_callin(void)
+{
+ int cpuid, phys_id;
+ unsigned long timeout;
+
+ /*
+ * If waken up by an INIT in an 82489DX configuration
+ * we may get here before an INIT-deassert IPI reaches
+ * our local APIC. We have to wait for the IPI or we'll
+ * lock up on an APIC access.
+ */
+ while (!atomic_read(&init_deasserted));
+
+ /*
+ * (This works even if the APIC is not enabled.)
+ */
+ phys_id = GET_APIC_ID(apic_read(APIC_ID));
+ cpuid = smp_processor_id();
+ if (cpu_isset(cpuid, cpu_callin_map)) {
+ panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
+ phys_id, cpuid);
+ }
+ Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
+
+ /*
+ * STARTUP IPIs are fragile beasts as they might sometimes
+ * trigger some glue motherboard logic. Complete APIC bus
+ * silence for 1 second, this overestimates the time the
+ * boot CPU is spending to send the up to 2 STARTUP IPIs
+ * by a factor of two. This should be enough.
+ */
+
+ /*
+ * Waiting 2s total for startup (udelay is not yet working)
+ */
+ timeout = jiffies + 2*HZ;
+ while (time_before(jiffies, timeout)) {
+ /*
+ * Has the boot CPU finished it's STARTUP sequence?
+ */
+ if (cpu_isset(cpuid, cpu_callout_map))
+ break;
+ rep_nop();
+ }
+
+ if (!time_before(jiffies, timeout)) {
+ panic("smp_callin: CPU%d started up but did not get a callout!\n",
+ cpuid);
+ }
+
+ /*
+ * the boot CPU has finished the init stage and is spinning
+ * on callin_map until we finish. We are free to set up this
+ * CPU, first the APIC. (this is probably redundant on most
+ * boards)
+ */
+
+ Dprintk("CALLIN, before setup_local_APIC().\n");
+ setup_local_APIC();
+
+ local_irq_enable();
+
+ /*
+ * Get our bogomips.
+ */
+ calibrate_delay();
+ Dprintk("Stack at about %p\n",&cpuid);
+
+ disable_APIC_timer();
+
+ /*
+ * Save our processor parameters
+ */
+ smp_store_cpu_info(cpuid);
+
+ local_irq_disable();
+
+ /*
+ * Allow the master to continue.
+ */
+ cpu_set(cpuid, cpu_callin_map);
+
+ /*
+ * Synchronize the TSC with the BP
+ */
+ if (cpu_has_tsc)
+ synchronize_tsc_ap();
+}
+
+int cpucount;
+
+/*
+ * Activate a secondary processor.
+ */
+void __init start_secondary(void)
+{
+ /*
+ * Dont put anything before smp_callin(), SMP
+ * booting is too fragile that we want to limit the
+ * things done here to the most necessary things.
+ */
+ cpu_init();
+ smp_callin();
+
+ /* otherwise gcc will move up the smp_processor_id before the cpu_init */
+ barrier();
+
+ Dprintk("cpu %d: waiting for commence\n", smp_processor_id());
+ while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+ rep_nop();
+
+ Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
+ setup_secondary_APIC_clock();
+
+ Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
+
+ if (nmi_watchdog == NMI_IO_APIC) {
+ disable_8259A_irq(0);
+ enable_NMI_through_LVT0(NULL);
+ enable_8259A_irq(0);
+ }
+
+
+ enable_APIC_timer();
+
+ /*
+ * low-memory mappings have been cleared, flush them from
+ * the local TLBs too.
+ */
+ local_flush_tlb();
+
+ Dprintk("cpu %d eSetting cpu_online_map\n", smp_processor_id());
+ cpu_set(smp_processor_id(), cpu_online_map);
+ wmb();
+
+ cpu_idle();
+}
+
+extern volatile unsigned long init_rsp;
+extern void (*initial_code)(void);
+
+#if APIC_DEBUG
+static inline void inquire_remote_apic(int apicid)
+{
+ unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
+ char *names[] = { "ID", "VERSION", "SPIV" };
+ int timeout, status;
+
+ printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
+
+ for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
+ printk("... APIC #%d %s: ", apicid, names[i]);
+
+ /*
+ * Wait for idle.
+ */
+ apic_wait_icr_idle();
+
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
+ apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
+
+ timeout = 0;
+ do {
+ udelay(100);
+ status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
+ } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
+
+ switch (status) {
+ case APIC_ICR_RR_VALID:
+ status = apic_read(APIC_RRR);
+ printk("%08x\n", status);
+ break;
+ default:
+ printk("failed\n");
+ }
+ }
+}
+#endif
+
+static int __init wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
+{
+ unsigned long send_status = 0, accept_status = 0;
+ int maxlvt, timeout, num_starts, j;
+
+ Dprintk("Asserting INIT.\n");
+
+ /*
+ * Turn INIT on target chip
+ */
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+
+ /*
+ * Send IPI
+ */
+ apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
+ | APIC_DM_INIT);
+
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+ do {
+ Dprintk("+");
+ udelay(100);
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ } while (send_status && (timeout++ < 1000));
+
+ mdelay(10);
+
+ Dprintk("Deasserting INIT.\n");
+
+ /* Target chip */
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+
+ /* Send IPI */
+ apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
+
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+ do {
+ Dprintk("+");
+ udelay(100);
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ } while (send_status && (timeout++ < 1000));
+
+ atomic_set(&init_deasserted, 1);
+
+ /*
+ * Should we send STARTUP IPIs ?
+ *
+ * Determine this based on the APIC version.
+ * If we don't have an integrated APIC, don't send the STARTUP IPIs.
+ */
+ if (APIC_INTEGRATED(apic_version[phys_apicid]))
+ num_starts = 2;
+ else
+ num_starts = 0;
+
+ /*
+ * Run STARTUP IPI loop.
+ */
+ Dprintk("#startup loops: %d.\n", num_starts);
+
+ maxlvt = get_maxlvt();
+
+ for (j = 1; j <= num_starts; j++) {
+ Dprintk("Sending STARTUP #%d.\n",j);
+ apic_read_around(APIC_SPIV);
+ apic_write(APIC_ESR, 0);
+ apic_read(APIC_ESR);
+ Dprintk("After apic_write.\n");
+
+ /*
+ * STARTUP IPI
+ */
+
+ /* Target chip */
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+
+ /* Boot on the stack */
+ /* Kick the second */
+ apic_write_around(APIC_ICR, APIC_DM_STARTUP
+ | (start_rip >> 12));
+
+ /*
+ * Give the other CPU some time to accept the IPI.
+ */
+ udelay(300);
+
+ Dprintk("Startup point 1.\n");
+
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+ do {
+ Dprintk("+");
+ udelay(100);
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ } while (send_status && (timeout++ < 1000));
+
+ /*
+ * Give the other CPU some time to accept the IPI.
+ */
+ udelay(200);
+ /*
+ * Due to the Pentium erratum 3AP.
+ */
+ if (maxlvt > 3) {
+ apic_read_around(APIC_SPIV);
+ apic_write(APIC_ESR, 0);
+ }
+ accept_status = (apic_read(APIC_ESR) & 0xEF);
+ if (send_status || accept_status)
+ break;
+ }
+ Dprintk("After Startup.\n");
+
+ if (send_status)
+ printk(KERN_ERR "APIC never delivered???\n");
+ if (accept_status)
+ printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
+
+ return (send_status | accept_status);
+}
+
+static void __init do_boot_cpu (int apicid)
+{
+ struct task_struct *idle;
+ unsigned long boot_error;
+ int timeout, cpu;
+ unsigned long start_rip;
+
+ cpu = ++cpucount;
+ /*
+ * We can't use kernel_thread since we must avoid to
+ * reschedule the child.
+ */
+ idle = fork_idle(cpu);
+ if (IS_ERR(idle))
+ panic("failed fork for CPU %d", cpu);
+ x86_cpu_to_apicid[cpu] = apicid;
+
+ cpu_pda[cpu].pcurrent = idle;
+
+ start_rip = setup_trampoline();
+
+ init_rsp = idle->thread.rsp;
+ per_cpu(init_tss,cpu).rsp0 = init_rsp;
+ initial_code = start_secondary;
+ clear_ti_thread_flag(idle->thread_info, TIF_FORK);
+
+ printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid,
+ start_rip, init_rsp);
+
+ /*
+ * This grunge runs the startup process for
+ * the targeted processor.
+ */
+
+ atomic_set(&init_deasserted, 0);
+
+ Dprintk("Setting warm reset code and vector.\n");
+
+ CMOS_WRITE(0xa, 0xf);
+ local_flush_tlb();
+ Dprintk("1.\n");
+ *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
+ Dprintk("2.\n");
+ *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
+ Dprintk("3.\n");
+
+ /*
+ * Be paranoid about clearing APIC errors.
+ */
+ if (APIC_INTEGRATED(apic_version[apicid])) {
+ apic_read_around(APIC_SPIV);
+ apic_write(APIC_ESR, 0);
+ apic_read(APIC_ESR);
+ }
+
+ /*
+ * Status is now clean
+ */
+ boot_error = 0;
+
+ /*
+ * Starting actual IPI sequence...
+ */
+ boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
+
+ if (!boot_error) {
+ /*
+ * allow APs to start initializing.
+ */
+ Dprintk("Before Callout %d.\n", cpu);
+ cpu_set(cpu, cpu_callout_map);
+ Dprintk("After Callout %d.\n", cpu);
+
+ /*
+ * Wait 5s total for a response
+ */
+ for (timeout = 0; timeout < 50000; timeout++) {
+ if (cpu_isset(cpu, cpu_callin_map))
+ break; /* It has booted */
+ udelay(100);
+ }
+
+ if (cpu_isset(cpu, cpu_callin_map)) {
+ /* number CPUs logically, starting from 1 (BSP is 0) */
+ Dprintk("OK.\n");
+ print_cpu_info(&cpu_data[cpu]);
+ Dprintk("CPU has booted.\n");
+ } else {
+ boot_error = 1;
+ if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
+ == 0xA5)
+ /* trampoline started but...? */
+ printk("Stuck ??\n");
+ else
+ /* trampoline code not run */
+ printk("Not responding.\n");
+#if APIC_DEBUG
+ inquire_remote_apic(apicid);
+#endif
+ }
+ }
+ if (boot_error) {
+ cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
+ clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
+ cpucount--;
+ x86_cpu_to_apicid[cpu] = BAD_APICID;
+ x86_cpu_to_log_apicid[cpu] = BAD_APICID;
+ }
+}
+
+cycles_t cacheflush_time;
+unsigned long cache_decay_ticks;
+
+static void smp_tune_scheduling (void)
+{
+ int cachesize; /* kB */
+ unsigned long bandwidth = 1000; /* MB/s */
+ /*
+ * Rough estimation for SMP scheduling, this is the number of
+ * cycles it takes for a fully memory-limited process to flush
+ * the SMP-local cache.
+ *
+ * (For a P5 this pretty much means we will choose another idle
+ * CPU almost always at wakeup time (this is due to the small
+ * L1 cache), on PIIs it's around 50-100 usecs, depending on
+ * the cache size)
+ */
+
+ if (!cpu_khz) {
+ /*
+ * this basically disables processor-affinity
+ * scheduling on SMP without a TSC.
+ */
+ cacheflush_time = 0;
+ return;
+ } else {
+ cachesize = boot_cpu_data.x86_cache_size;
+ if (cachesize == -1) {
+ cachesize = 16; /* Pentiums, 2x8kB cache */
+ bandwidth = 100;
+ }
+
+ cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
+ }
+
+ cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000;
+
+ printk(KERN_INFO "per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
+ (long)cacheflush_time/(cpu_khz/1000),
+ ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
+ printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n",
+ (cache_decay_ticks + 1) * 1000 / HZ);
+}
+
+/*
+ * Cycle through the processors sending APIC IPIs to boot each.
+ */
+
+static void __init smp_boot_cpus(unsigned int max_cpus)
+{
+ unsigned apicid, cpu, bit, kicked;
+
+ nmi_watchdog_default();
+
+ /*
+ * Setup boot CPU information
+ */
+ smp_store_cpu_info(0); /* Final full version of the data */
+ printk(KERN_INFO "CPU%d: ", 0);
+ print_cpu_info(&cpu_data[0]);
+
+ current_thread_info()->cpu = 0;
+ smp_tune_scheduling();
+
+ if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
+ printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
+ hard_smp_processor_id());
+ physid_set(hard_smp_processor_id(), phys_cpu_present_map);
+ }
+
+ /*
+ * If we couldn't find an SMP configuration at boot time,
+ * get out of here now!
+ */
+ if (!smp_found_config) {
+ printk(KERN_NOTICE "SMP motherboard not detected.\n");
+ io_apic_irqs = 0;
+ cpu_online_map = cpumask_of_cpu(0);
+ phys_cpu_present_map = physid_mask_of_physid(0);
+ if (APIC_init_uniprocessor())
+ printk(KERN_NOTICE "Local APIC not detected."
+ " Using dummy APIC emulation.\n");
+ goto smp_done;
+ }
+
+ /*
+ * Should not be necessary because the MP table should list the boot
+ * CPU too, but we do it for the sake of robustness anyway.
+ */
+ if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
+ printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
+ boot_cpu_id);
+ physid_set(hard_smp_processor_id(), phys_cpu_present_map);
+ }
+
+ /*
+ * If we couldn't find a local APIC, then get out of here now!
+ */
+ if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
+ printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
+ boot_cpu_id);
+ printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
+ io_apic_irqs = 0;
+ cpu_online_map = cpumask_of_cpu(0);
+ phys_cpu_present_map = physid_mask_of_physid(0);
+ disable_apic = 1;
+ goto smp_done;
+ }
+
+ verify_local_APIC();
+
+ /*
+ * If SMP should be disabled, then really disable it!
+ */
+ if (!max_cpus) {
+ smp_found_config = 0;
+ printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
+ io_apic_irqs = 0;
+ cpu_online_map = cpumask_of_cpu(0);
+ phys_cpu_present_map = physid_mask_of_physid(0);
+ disable_apic = 1;
+ goto smp_done;
+ }
+
+ connect_bsp_APIC();
+ setup_local_APIC();
+
+ if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id)
+ BUG();
+
+ x86_cpu_to_apicid[0] = boot_cpu_id;
+
+ /*
+ * Now scan the CPU present map and fire up the other CPUs.
+ */
+ Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
+
+ kicked = 1;
+ for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
+ apicid = cpu_present_to_apicid(bit);
+ /*
+ * Don't even attempt to start the boot CPU!
+ */
+ if (apicid == boot_cpu_id || (apicid == BAD_APICID))
+ continue;
+
+ if (!physid_isset(apicid, phys_cpu_present_map))
+ continue;
+ if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
+ continue;
+
+ do_boot_cpu(apicid);
+ ++kicked;
+ }
+
+ /*
+ * Cleanup possible dangling ends...
+ */
+ {
+ /*
+ * Install writable page 0 entry to set BIOS data area.
+ */
+ local_flush_tlb();
+
+ /*
+ * Paranoid: Set warm reset code and vector here back
+ * to default values.
+ */
+ CMOS_WRITE(0, 0xf);
+
+ *((volatile int *) phys_to_virt(0x467)) = 0;
+ }
+
+ /*
+ * Allow the user to impress friends.
+ */
+
+ Dprintk("Before bogomips.\n");
+ if (!cpucount) {
+ printk(KERN_INFO "Only one processor found.\n");
+ } else {
+ unsigned long bogosum = 0;
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ if (cpu_isset(cpu, cpu_callout_map))
+ bogosum += cpu_data[cpu].loops_per_jiffy;
+ printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+ cpucount+1,
+ bogosum/(500000/HZ),
+ (bogosum/(5000/HZ))%100);
+ Dprintk("Before bogocount - setting activated=1.\n");
+ }
+
+ /*
+ * Construct cpu_sibling_map[], so that we can tell the
+ * sibling CPU efficiently.
+ */
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ cpus_clear(cpu_sibling_map[cpu]);
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ int siblings = 0;
+ int i;
+ if (!cpu_isset(cpu, cpu_callout_map))
+ continue;
+
+ if (smp_num_siblings > 1) {
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_isset(i, cpu_callout_map))
+ continue;
+ if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ siblings++;
+ cpu_set(i, cpu_sibling_map[cpu]);
+ }
+ }
+ } else {
+ siblings++;
+ cpu_set(cpu, cpu_sibling_map[cpu]);
+ }
+
+ if (siblings != smp_num_siblings) {
+ printk(KERN_WARNING
+ "WARNING: %d siblings found for CPU%d, should be %d\n",
+ siblings, cpu, smp_num_siblings);
+ smp_num_siblings = siblings;
+ }
+ }
+
+ Dprintk("Boot done.\n");
+
+ /*
+ * Here we can be sure that there is an IO-APIC in the system. Let's
+ * go and set it up:
+ */
+ if (!skip_ioapic_setup && nr_ioapics)
+ setup_IO_APIC();
+ else
+ nr_ioapics = 0;
+
+ setup_boot_APIC_clock();
+
+ /*
+ * Synchronize the TSC with the AP
+ */
+ if (cpu_has_tsc && cpucount)
+ synchronize_tsc_bp();
+
+ smp_done:
+ time_init_smp();
+}
+
+/* These are wrappers to interface to the new boot process. Someone
+ who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ smp_boot_cpus(max_cpus);
+}
+
+void __devinit smp_prepare_boot_cpu(void)
+{
+ cpu_set(smp_processor_id(), cpu_online_map);
+ cpu_set(smp_processor_id(), cpu_callout_map);
+}
+
+int __devinit __cpu_up(unsigned int cpu)
+{
+ /* This only works at boot for x86. See "rewrite" above. */
+ if (cpu_isset(cpu, smp_commenced_mask)) {
+ local_irq_enable();
+ return -ENOSYS;
+ }
+
+ /* In case one didn't come up */
+ if (!cpu_isset(cpu, cpu_callin_map)) {
+ local_irq_enable();
+ return -EIO;
+ }
+ local_irq_enable();
+
+ /* Unleash the CPU! */
+ Dprintk("waiting for cpu %d\n", cpu);
+
+ cpu_set(cpu, smp_commenced_mask);
+ while (!cpu_isset(cpu, cpu_online_map))
+ mb();
+ return 0;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+#ifdef CONFIG_X86_IO_APIC
+ setup_ioapic_dest();
+#endif
+ zap_low_mappings();
+}
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/traps.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/traps.c
new file mode 100644
index 0000000000..3800567bb5
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/traps.c
@@ -0,0 +1,974 @@
+/*
+ * linux/arch/x86-64/traps.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ * $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'entry.S'.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/debugreg.h>
+#include <asm/desc.h>
+#include <asm/i387.h>
+#include <asm/kdebug.h>
+#include <asm/processor.h>
+
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+#include <asm/pda.h>
+#include <asm/proto.h>
+#include <asm/nmi.h>
+
+#include <linux/irq.h>
+
+
+extern struct gate_struct idt_table[256];
+
+asmlinkage void divide_error(void);
+asmlinkage void debug(void);
+asmlinkage void nmi(void);
+asmlinkage void int3(void);
+asmlinkage void overflow(void);
+asmlinkage void bounds(void);
+asmlinkage void invalid_op(void);
+asmlinkage void device_not_available(void);
+asmlinkage void double_fault(void);
+asmlinkage void coprocessor_segment_overrun(void);
+asmlinkage void invalid_TSS(void);
+asmlinkage void segment_not_present(void);
+asmlinkage void stack_segment(void);
+asmlinkage void general_protection(void);
+asmlinkage void page_fault(void);
+asmlinkage void coprocessor_error(void);
+asmlinkage void simd_coprocessor_error(void);
+asmlinkage void reserved(void);
+asmlinkage void alignment_check(void);
+asmlinkage void machine_check(void);
+asmlinkage void spurious_interrupt_bug(void);
+asmlinkage void call_debug(void);
+
+struct notifier_block *die_chain;
+static DEFINE_SPINLOCK(die_notifier_lock);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+ int err = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&die_notifier_lock, flags);
+ err = notifier_chain_register(&die_chain, nb);
+ spin_unlock_irqrestore(&die_notifier_lock, flags);
+ return err;
+}
+
+static inline void conditional_sti(struct pt_regs *regs)
+{
+ if (regs->eflags & X86_EFLAGS_IF)
+ local_irq_enable();
+}
+
+static int kstack_depth_to_print = 10;
+
+#ifdef CONFIG_KALLSYMS
+#include <linux/kallsyms.h>
+int printk_address(unsigned long address)
+{
+ unsigned long offset = 0, symsize;
+ const char *symname;
+ char *modname;
+ char *delim = ":";
+ char namebuf[128];
+
+ symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
+ if (!symname)
+ return printk("[<%016lx>]", address);
+ if (!modname)
+ modname = delim = "";
+ return printk("<%016lx>{%s%s%s%s%+ld}",
+ address,delim,modname,delim,symname,offset);
+}
+#else
+int printk_address(unsigned long address)
+{
+ return printk("[<%016lx>]", address);
+}
+#endif
+
+unsigned long *in_exception_stack(int cpu, unsigned long stack)
+{
+ int k;
+ for (k = 0; k < N_EXCEPTION_STACKS; k++) {
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ unsigned long end = tss->ist[k] + EXCEPTION_STKSZ;
+
+ if (stack >= tss->ist[k] && stack <= end)
+ return (unsigned long *)end;
+ }
+ return NULL;
+}
+
+/*
+ * x86-64 can have upto three kernel stacks:
+ * process stack
+ * interrupt stack
+ * severe exception (double fault, nmi, stack fault) hardware stack
+ * Check and process them in order.
+ */
+
+void show_trace(unsigned long *stack)
+{
+ unsigned long addr;
+ unsigned long *irqstack, *irqstack_end, *estack_end;
+ const int cpu = safe_smp_processor_id();
+ int i;
+
+ printk("\nCall Trace:");
+ i = 0;
+
+ estack_end = in_exception_stack(cpu, (unsigned long)stack);
+ if (estack_end) {
+ while (stack < estack_end) {
+ addr = *stack++;
+ if (__kernel_text_address(addr)) {
+ i += printk_address(addr);
+ i += printk(" ");
+ if (i > 50) {
+ printk("\n");
+ i = 0;
+ }
+ }
+ }
+ i += printk(" <EOE> ");
+ i += 7;
+ stack = (unsigned long *) estack_end[-2];
+ }
+
+ irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
+ irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE + 64);
+
+ if (stack >= irqstack && stack < irqstack_end) {
+ printk("<IRQ> ");
+ while (stack < irqstack_end) {
+ addr = *stack++;
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (__kernel_text_address(addr)) {
+ i += printk_address(addr);
+ i += printk(" ");
+ if (i > 50) {
+ printk("\n ");
+ i = 0;
+ }
+ }
+ }
+ stack = (unsigned long *) (irqstack_end[-1]);
+ printk(" <EOI> ");
+ i += 7;
+ }
+
+ while (((long) stack & (THREAD_SIZE-1)) != 0) {
+ addr = *stack++;
+ if (__kernel_text_address(addr)) {
+ i += printk_address(addr);
+ i += printk(" ");
+ if (i > 50) {
+ printk("\n ");
+ i = 0;
+ }
+ }
+ }
+ printk("\n");
+}
+
+void show_stack(struct task_struct *tsk, unsigned long * rsp)
+{
+ unsigned long *stack;
+ int i;
+ const int cpu = safe_smp_processor_id();
+ unsigned long *irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
+ unsigned long *irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE);
+
+ // debugging aid: "show_stack(NULL, NULL);" prints the
+ // back trace for this cpu.
+
+ if (rsp == NULL) {
+ if (tsk)
+ rsp = (unsigned long *)tsk->thread.rsp;
+ else
+ rsp = (unsigned long *)&rsp;
+ }
+
+ stack = rsp;
+ for(i=0; i < kstack_depth_to_print; i++) {
+ if (stack >= irqstack && stack <= irqstack_end) {
+ if (stack == irqstack_end) {
+ stack = (unsigned long *) (irqstack_end[-1]);
+ printk(" <EOI> ");
+ }
+ } else {
+ if (((long) stack & (THREAD_SIZE-1)) == 0)
+ break;
+ }
+ if (i && ((i % 4) == 0))
+ printk("\n ");
+ printk("%016lx ", *stack++);
+ }
+ show_trace((unsigned long *)rsp);
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+ unsigned long dummy;
+ show_trace(&dummy);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void show_registers(struct pt_regs *regs)
+{
+ int i;
+ int in_kernel = (regs->cs & 3) == 0;
+ unsigned long rsp;
+ const int cpu = safe_smp_processor_id();
+ struct task_struct *cur = cpu_pda[cpu].pcurrent;
+
+ rsp = regs->rsp;
+
+ printk("CPU %d ", cpu);
+ __show_regs(regs);
+ printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
+ cur->comm, cur->pid, cur->thread_info, cur);
+
+ /*
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+ if (in_kernel) {
+
+ printk("Stack: ");
+ show_stack(NULL, (unsigned long*)rsp);
+
+ printk("\nCode: ");
+ if(regs->rip < PAGE_OFFSET)
+ goto bad;
+
+ for(i=0;i<20;i++)
+ {
+ unsigned char c;
+ if(__get_user(c, &((unsigned char*)regs->rip)[i])) {
+bad:
+ printk(" Bad RIP value.");
+ break;
+ }
+ printk("%02x ", c);
+ }
+ }
+ printk("\n");
+}
+
+void handle_BUG(struct pt_regs *regs)
+{
+ struct bug_frame f;
+ char tmp;
+
+ if (regs->cs & 3)
+ return;
+ if (__copy_from_user(&f, (struct bug_frame *) regs->rip,
+ sizeof(struct bug_frame)))
+ return;
+ if ((unsigned long)f.filename < __PAGE_OFFSET ||
+ f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
+ return;
+ if (__get_user(tmp, f.filename))
+ f.filename = "unmapped filename";
+ printk("----------- [cut here ] --------- [please bite here ] ---------\n");
+ printk(KERN_ALERT "Kernel BUG at %.50s:%d\n", f.filename, f.line);
+}
+
+void out_of_line_bug(void)
+{
+ BUG();
+}
+
+static DEFINE_SPINLOCK(die_lock);
+static int die_owner = -1;
+
+void oops_begin(void)
+{
+ int cpu = safe_smp_processor_id();
+ /* racy, but better than risking deadlock. */
+ local_irq_disable();
+ if (!spin_trylock(&die_lock)) {
+ if (cpu == die_owner)
+ /* nested oops. should stop eventually */;
+ else
+ spin_lock(&die_lock);
+ }
+ die_owner = cpu;
+ console_verbose();
+ bust_spinlocks(1);
+}
+
+void oops_end(void)
+{
+ die_owner = -1;
+ bust_spinlocks(0);
+ spin_unlock(&die_lock);
+ local_irq_enable(); /* make sure back scroll still works */
+ if (panic_on_oops)
+ panic("Oops");
+}
+
+void __die(const char * str, struct pt_regs * regs, long err)
+{
+ static int die_counter;
+ printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
+#ifdef CONFIG_PREEMPT
+ printk("PREEMPT ");
+#endif
+#ifdef CONFIG_SMP
+ printk("SMP ");
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ printk("DEBUG_PAGEALLOC");
+#endif
+ printk("\n");
+ notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
+ show_registers(regs);
+ /* Executive summary in case the oops scrolled away */
+ printk(KERN_ALERT "RIP ");
+ printk_address(regs->rip);
+ printk(" RSP <%016lx>\n", regs->rsp);
+}
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ oops_begin();
+ handle_BUG(regs);
+ __die(str, regs, err);
+ oops_end();
+ do_exit(SIGSEGV);
+}
+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+{
+ if (!(regs->eflags & VM_MASK) && (regs->cs == __KERNEL_CS))
+ die(str, regs, err);
+}
+
+#if 0
+void die_nmi(char *str, struct pt_regs *regs)
+{
+ oops_begin();
+ /*
+ * We are in trouble anyway, lets at least try
+ * to get a message out.
+ */
+ printk(str, safe_smp_processor_id());
+ show_registers(regs);
+ if (panic_on_timeout || panic_on_oops)
+ panic("nmi watchdog");
+ printk("console shuts up ...\n");
+ oops_end();
+ do_exit(SIGSEGV);
+}
+#endif
+
+static void do_trap(int trapnr, int signr, char *str,
+ struct pt_regs * regs, long error_code, siginfo_t *info)
+{
+ conditional_sti(regs);
+
+#ifdef CONFIG_CHECKING
+ {
+ unsigned long gs;
+ struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
+ rdmsrl(MSR_GS_BASE, gs);
+ if (gs != (unsigned long)pda) {
+ wrmsrl(MSR_GS_BASE, pda);
+ printk("%s: wrong gs %lx expected %p rip %lx\n", str, gs, pda,
+ regs->rip);
+ }
+ }
+#endif
+
+ if ((regs->cs & 3) != 0) {
+ struct task_struct *tsk = current;
+
+ if (exception_trace && unhandled_signal(tsk, signr))
+ printk(KERN_INFO
+ "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
+ tsk->comm, tsk->pid, str,
+ regs->rip,regs->rsp,error_code);
+
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
+ if (info)
+ force_sig_info(signr, info, tsk);
+ else
+ force_sig(signr, tsk);
+ return;
+ }
+
+
+ /* kernel trap */
+ {
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->rip);
+ if (fixup) {
+ regs->rip = fixup->fixup;
+ } else
+ die(str, regs, error_code);
+ return;
+ }
+}
+
+#define DO_ERROR(trapnr, signr, str, name) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, regs, error_code, NULL); \
+}
+
+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ siginfo_t info; \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void __user *)siaddr; \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, regs, error_code, &info); \
+}
+
+DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
+DO_ERROR( 4, SIGSEGV, "overflow", overflow)
+DO_ERROR( 5, SIGSEGV, "bounds", bounds)
+DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->rip)
+DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
+DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
+DO_ERROR(18, SIGSEGV, "reserved", reserved)
+
+#define DO_ERROR_STACK(trapnr, signr, str, name) \
+asmlinkage void *do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ struct pt_regs *pr = ((struct pt_regs *)(current->thread.rsp0))-1; \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return regs; \
+ if (regs->cs & 3) { \
+ memcpy(pr, regs, sizeof(struct pt_regs)); \
+ regs = pr; \
+ } \
+ do_trap(trapnr, signr, str, regs, error_code, NULL); \
+ return regs; \
+}
+
+DO_ERROR_STACK(12, SIGBUS, "stack segment", stack_segment)
+DO_ERROR_STACK( 8, SIGSEGV, "double fault", double_fault)
+
+asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
+{
+ conditional_sti(regs);
+
+#ifdef CONFIG_CHECKING
+ {
+ unsigned long gs;
+ struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
+ rdmsrl(MSR_GS_BASE, gs);
+ if (gs != (unsigned long)pda) {
+ wrmsrl(MSR_GS_BASE, pda);
+ oops_in_progress++;
+ printk("general protection handler: wrong gs %lx expected %p\n", gs, pda);
+ oops_in_progress--;
+ }
+ }
+#endif
+
+ if ((regs->cs & 3)!=0) {
+ struct task_struct *tsk = current;
+
+ if (exception_trace && unhandled_signal(tsk, SIGSEGV))
+ printk(KERN_INFO
+ "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
+ tsk->comm, tsk->pid,
+ regs->rip,regs->rsp,error_code);
+
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 13;
+ force_sig(SIGSEGV, tsk);
+ return;
+ }
+
+ /* kernel gp */
+ {
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->rip);
+ if (fixup) {
+ regs->rip = fixup->fixup;
+ return;
+ }
+ if (notify_die(DIE_GPF, "general protection fault", regs,
+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
+ return;
+ die("general protection fault", regs, error_code);
+ }
+}
+
+#if 0
+static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
+{
+ printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
+ printk("You probably have a hardware problem with your RAM chips\n");
+
+ /* Clear and disable the memory parity error line. */
+ reason = (reason & 0xf) | 4;
+ outb(reason, 0x61);
+}
+
+static void io_check_error(unsigned char reason, struct pt_regs * regs)
+{
+ printk("NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+
+ /* Re-enable the IOCK line, wait for a few seconds */
+ reason = (reason & 0xf) | 8;
+ outb(reason, 0x61);
+ mdelay(2000);
+ reason &= ~8;
+ outb(reason, 0x61);
+}
+
+static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
+ printk("Dazed and confused, but trying to continue\n");
+ printk("Do you have a strange power saving mode enabled?\n");
+}
+
+asmlinkage void default_do_nmi(struct pt_regs *regs)
+{
+ unsigned char reason = 0;
+
+ /* Only the BSP gets external NMIs from the system. */
+ if (!smp_processor_id())
+ reason = get_nmi_reason();
+
+ if (!(reason & 0xc0)) {
+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
+ == NOTIFY_STOP)
+ return;
+#ifdef CONFIG_X86_LOCAL_APIC
+ /*
+ * Ok, so this is none of the documented NMI sources,
+ * so it must be the NMI watchdog.
+ */
+ if (nmi_watchdog > 0) {
+ nmi_watchdog_tick(regs,reason);
+ return;
+ }
+#endif
+ unknown_nmi_error(reason, regs);
+ return;
+ }
+ if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
+ return;
+
+ /* AK: following checks seem to be broken on modern chipsets. FIXME */
+
+ if (reason & 0x80)
+ mem_parity_error(reason, regs);
+ if (reason & 0x40)
+ io_check_error(reason, regs);
+
+ /*
+ * Reassert NMI in case it became active meanwhile
+ * as it's edge-triggered.
+ */
+ outb(0x8f, 0x70);
+ inb(0x71); /* dummy */
+ outb(0x0f, 0x70);
+ inb(0x71); /* dummy */
+}
+#endif
+
+asmlinkage void do_int3(struct pt_regs * regs, long error_code)
+{
+ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
+ return;
+ }
+ do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
+ return;
+}
+
+/* runs on IST stack. */
+asmlinkage void *do_debug(struct pt_regs * regs, unsigned long error_code)
+{
+ struct pt_regs *pr;
+ unsigned long condition;
+ struct task_struct *tsk = current;
+ siginfo_t info;
+
+ pr = (struct pt_regs *)(current->thread.rsp0)-1;
+ if (regs->cs & 3) {
+ memcpy(pr, regs, sizeof(struct pt_regs));
+ regs = pr;
+ }
+
+#ifdef CONFIG_CHECKING
+ {
+ /* RED-PEN interaction with debugger - could destroy gs */
+ unsigned long gs;
+ struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
+ rdmsrl(MSR_GS_BASE, gs);
+ if (gs != (unsigned long)pda) {
+ wrmsrl(MSR_GS_BASE, pda);
+ printk("debug handler: wrong gs %lx expected %p\n", gs, pda);
+ }
+ }
+#endif
+
+ asm("movq %%db6,%0" : "=r" (condition));
+
+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
+ SIGTRAP) == NOTIFY_STOP) {
+ return regs;
+ }
+ conditional_sti(regs);
+
+ /* Mask out spurious debug traps due to lazy DR7 setting */
+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
+ if (!tsk->thread.debugreg7) {
+ goto clear_dr7;
+ }
+ }
+
+ tsk->thread.debugreg6 = condition;
+
+ /* Mask out spurious TF errors due to lazy TF clearing */
+ if ((condition & DR_STEP) &&
+ (notify_die(DIE_DEBUGSTEP, "debugstep", regs, condition,
+ 1, SIGTRAP) != NOTIFY_STOP)) {
+ /*
+ * The TF error should be masked out only if the current
+ * process is not traced and if the TRAP flag has been set
+ * previously by a tracing process (condition detected by
+ * the PT_DTRACE flag); remember that the i386 TRAP flag
+ * can be modified by the process itself in user mode,
+ * allowing programs to debug themselves without the ptrace()
+ * interface.
+ */
+ if ((regs->cs & 3) == 0)
+ goto clear_TF_reenable;
+ if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
+ goto clear_TF;
+ }
+
+ /* Ok, finally something we can handle */
+ tsk->thread.trap_no = 1;
+ tsk->thread.error_code = error_code;
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+ if ((regs->cs & 3) == 0)
+ goto clear_dr7;
+
+ info.si_addr = (void __user *)regs->rip;
+ force_sig_info(SIGTRAP, &info, tsk);
+clear_dr7:
+ asm volatile("movq %0,%%db7"::"r"(0UL));
+ notify_die(DIE_DEBUG, "debug", regs, condition, 1, SIGTRAP);
+ return regs;
+
+clear_TF_reenable:
+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+
+clear_TF:
+ /* RED-PEN could cause spurious errors */
+ if (notify_die(DIE_DEBUG, "debug2", regs, condition, 1, SIGTRAP)
+ != NOTIFY_STOP)
+ regs->eflags &= ~TF_MASK;
+ return regs;
+}
+
+static int kernel_math_error(struct pt_regs *regs, char *str)
+{
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->rip);
+ if (fixup) {
+ regs->rip = fixup->fixup;
+ return 1;
+ }
+ notify_die(DIE_GPF, str, regs, 0, 16, SIGFPE);
+#if 0
+ /* This should be a die, but warn only for now */
+ die(str, regs, 0);
+#else
+ printk(KERN_DEBUG "%s: %s at ", current->comm, str);
+ printk_address(regs->rip);
+ printk("\n");
+#endif
+ return 0;
+}
+
+/*
+ * Note that we play around with the 'TS' bit in an attempt to get
+ * the correct behaviour even in the presence of the asynchronous
+ * IRQ13 behaviour
+ */
+asmlinkage void do_coprocessor_error(struct pt_regs *regs)
+{
+ void __user *rip = (void __user *)(regs->rip);
+ struct task_struct * task;
+ siginfo_t info;
+ unsigned short cwd, swd;
+
+ conditional_sti(regs);
+ if ((regs->cs & 3) == 0 &&
+ kernel_math_error(regs, "kernel x87 math error"))
+ return;
+
+ /*
+ * Save the info for the exception handler and clear the error.
+ */
+ task = current;
+ save_init_fpu(task);
+ task->thread.trap_no = 16;
+ task->thread.error_code = 0;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = rip;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ * status. 0x3f is the exception bits in these regs, 0x200 is the
+ * C1 reg you need in case of a stack fault, 0x040 is the stack
+ * fault bit. We should only be taking one exception at a time,
+ * so if this combination doesn't produce any single exception,
+ * then we have a bad program that isn't synchronizing its FPU usage
+ * and it will suffer the consequences since we won't be able to
+ * fully reproduce the context of the exception
+ */
+ cwd = get_fpu_cwd(task);
+ swd = get_fpu_swd(task);
+ switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
+ case 0x000:
+ default:
+ break;
+ case 0x001: /* Invalid Op */
+ case 0x041: /* Stack Fault */
+ case 0x241: /* Stack Fault | Direction */
+ info.si_code = FPE_FLTINV;
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ info.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ info.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ info.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ info.si_code = FPE_FLTRES;
+ break;
+ }
+ force_sig_info(SIGFPE, &info, task);
+}
+
+asmlinkage void bad_intr(void)
+{
+ printk("bad interrupt");
+}
+
+asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
+{
+ void __user *rip = (void __user *)(regs->rip);
+ struct task_struct * task;
+ siginfo_t info;
+ unsigned short mxcsr;
+
+ conditional_sti(regs);
+ if ((regs->cs & 3) == 0 &&
+ kernel_math_error(regs, "simd math error"))
+ return;
+
+ /*
+ * Save the info for the exception handler and clear the error.
+ */
+ task = current;
+ save_init_fpu(task);
+ task->thread.trap_no = 19;
+ task->thread.error_code = 0;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = rip;
+ /*
+ * The SIMD FPU exceptions are handled a little differently, as there
+ * is only a single status/control register. Thus, to determine which
+ * unmasked exception was caught we must mask the exception mask bits
+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+ */
+ mxcsr = get_fpu_mxcsr(task);
+ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
+ case 0x000:
+ default:
+ break;
+ case 0x001: /* Invalid Op */
+ info.si_code = FPE_FLTINV;
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ info.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ info.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ info.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ info.si_code = FPE_FLTRES;
+ break;
+ }
+ force_sig_info(SIGFPE, &info, task);
+}
+
+asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
+{
+}
+
+#if 0
+asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
+{
+}
+#endif
+
+/*
+ * 'math_state_restore()' saves the current math information in the
+ * old math state array, and gets the new ones from the current task
+ *
+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
+ * Don't touch unless you *really* know how it works.
+ */
+asmlinkage void math_state_restore(void)
+{
+ struct task_struct *me = current;
+
+ /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
+
+ if (!used_math())
+ init_fpu(me);
+ restore_fpu_checking(&me->thread.i387.fxsave);
+ me->thread_info->status |= TS_USEDFPU;
+}
+
+void do_call_debug(struct pt_regs *regs)
+{
+ notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
+}
+
+
+static trap_info_t trap_table[] = {
+ { 0, 0, (__KERNEL_CS|0x3), 0, (unsigned long)divide_error },
+ { 1, 0, (__KERNEL_CS|0x3), 0, (unsigned long)debug },
+ { 3, 3, (__KERNEL_CS|0x3), 0, (unsigned long)int3 },
+ { 4, 3, (__KERNEL_CS|0x3), 0, (unsigned long)overflow },
+ { 5, 3, (__KERNEL_CS|0x3), 0, (unsigned long)bounds },
+ { 6, 0, (__KERNEL_CS|0x3), 0, (unsigned long)invalid_op },
+ { 7, 0, (__KERNEL_CS|0x3), 0, (unsigned long)device_not_available },
+ { 9, 0, (__KERNEL_CS|0x3), 0, (unsigned long)coprocessor_segment_overrun},
+ { 10, 0, (__KERNEL_CS|0x3), 0, (unsigned long)invalid_TSS },
+ { 11, 0, (__KERNEL_CS|0x3), 0, (unsigned long)segment_not_present },
+ { 12, 0, (__KERNEL_CS|0x3), 0, (unsigned long)stack_segment },
+ { 13, 0, (__KERNEL_CS|0x3), 0, (unsigned long)general_protection },
+ { 14, 0, (__KERNEL_CS|0x3), 0, (unsigned long)page_fault },
+ { 15, 0, (__KERNEL_CS|0x3), 0, (unsigned long)spurious_interrupt_bug },
+ { 16, 0, (__KERNEL_CS|0x3), 0, (unsigned long)coprocessor_error },
+ { 17, 0, (__KERNEL_CS|0x3), 0, (unsigned long)alignment_check },
+#ifdef CONFIG_X86_MCE
+ { 18, 0, (__KERNEL_CS|0x3), 0, (unsigned long)machine_check },
+#endif
+ { 19, 0, (__KERNEL_CS|0x3), 0, (unsigned long)simd_coprocessor_error },
+ { SYSCALL_VECTOR, 3, (__KERNEL_CS|0x3), 0, (unsigned long)system_call },
+ { 0, 0, 0, 0, 0 }
+};
+
+void __init trap_init(void)
+{
+ int ret;
+
+ ret = HYPERVISOR_set_trap_table(trap_table);
+
+ if (ret)
+ printk("HYPERVISOR_set_trap_table faild: error %d\n",
+ ret);
+
+#ifdef CONFIG_IA32_EMULATION
+ set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
+#endif
+
+ /*
+ * Should be a barrier for any external CPU state.
+ */
+ cpu_init();
+}
+
+
+/* Actual parsing is done early in setup.c. */
+static int __init oops_dummy(char *s)
+{
+ panic_on_oops = 1;
+ return -1;
+}
+__setup("oops=", oops_dummy);
+
+static int __init kstack_setup(char *s)
+{
+ kstack_depth_to_print = simple_strtoul(s,NULL,0);
+ return 0;
+}
+__setup("kstack=", kstack_setup);
+
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/vsyscall.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/vsyscall.c
new file mode 100644
index 0000000000..f980cdefff
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/vsyscall.c
@@ -0,0 +1,190 @@
+/*
+ * linux/arch/x86_64/kernel/vsyscall.c
+ *
+ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
+ * Copyright 2003 Andi Kleen, SuSE Labs.
+ *
+ * Thanks to hpa@transmeta.com for some useful hint.
+ * Special thanks to Ingo Molnar for his early experience with
+ * a different vsyscall implementation for Linux/IA32 and for the name.
+ *
+ * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
+ * at virtual address -10Mbyte+1024bytes etc... There are at max 8192
+ * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
+ * jumping out of line if necessary.
+ *
+ * Note: the concept clashes with user mode linux. If you use UML just
+ * set the kernel.vsyscall sysctl to 0.
+ */
+
+/*
+ * TODO 2001-03-20:
+ *
+ * 1) make page fault handler detect faults on page1-page-last of the vsyscall
+ * virtual space, and make it increase %rip and write -ENOSYS in %rax (so
+ * we'll be able to upgrade to a new glibc without upgrading kernel after
+ * we add more vsyscalls.
+ * 2) Possibly we need a fixmap table for the vsyscalls too if we want
+ * to avoid SIGSEGV and we want to return -EFAULT from the vsyscalls as well.
+ * Can we segfault inside a "syscall"? We can fix this anytime and those fixes
+ * won't be visible for userspace. Not fixing this is a noop for correct programs,
+ * broken programs will segfault and there's no security risk until we choose to
+ * fix it.
+ *
+ * These are not urgent things that we need to address only before shipping the first
+ * production binary kernels.
+ */
+
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/seqlock.h>
+#include <linux/jiffies.h>
+
+#include <asm/vsyscall.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/fixmap.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+
+#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+#define force_inline __attribute__((always_inline)) inline
+
+int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
+seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
+
+#include <asm/unistd.h>
+
+static force_inline void timeval_normalize(struct timeval * tv)
+{
+ time_t __sec;
+
+ __sec = tv->tv_usec / 1000000;
+ if (__sec)
+ {
+ tv->tv_usec %= 1000000;
+ tv->tv_sec += __sec;
+ }
+}
+
+static force_inline void do_vgettimeofday(struct timeval * tv)
+{
+ long sequence, t;
+ unsigned long sec, usec;
+
+ do {
+ sequence = read_seqbegin(&__xtime_lock);
+
+ sec = __xtime.tv_sec;
+ usec = (__xtime.tv_nsec / 1000) +
+ (__jiffies - __wall_jiffies) * (1000000 / HZ);
+
+ if (__vxtime.mode == VXTIME_TSC) {
+ sync_core();
+ rdtscll(t);
+ if (t < __vxtime.last_tsc) t = __vxtime.last_tsc;
+ usec += ((t - __vxtime.last_tsc) *
+ __vxtime.tsc_quot) >> 32;
+ /* See comment in x86_64 do_gettimeofday. */
+ } else {
+ usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
+ __vxtime.last) * __vxtime.quot) >> 32;
+ }
+ } while (read_seqretry(&__xtime_lock, sequence));
+
+ tv->tv_sec = sec + usec / 1000000;
+ tv->tv_usec = usec % 1000000;
+}
+
+/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
+static force_inline void do_get_tz(struct timezone * tz)
+{
+ *tz = __sys_tz;
+}
+
+
+static force_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ int ret;
+ asm volatile("syscall"
+ : "=a" (ret)
+ : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
+ return ret;
+}
+
+static force_inline long time_syscall(long *t)
+{
+ long secs;
+ asm volatile("syscall"
+ : "=a" (secs)
+ : "0" (__NR_time),"D" (t) : __syscall_clobber);
+ return secs;
+}
+
+static int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
+{
+ if (unlikely(!__sysctl_vsyscall))
+ return gettimeofday(tv,tz);
+ if (tv)
+ do_vgettimeofday(tv);
+ if (tz)
+ do_get_tz(tz);
+ return 0;
+}
+
+/* This will break when the xtime seconds get inaccurate, but that is
+ * unlikely */
+static time_t __vsyscall(1) vtime(time_t *t)
+{
+ if (unlikely(!__sysctl_vsyscall))
+ return time_syscall(t);
+ else if (t)
+ *t = __xtime.tv_sec;
+ return __xtime.tv_sec;
+}
+
+static long __vsyscall(2) venosys_0(void)
+{
+ return -ENOSYS;
+}
+
+static long __vsyscall(3) venosys_1(void)
+{
+ return -ENOSYS;
+
+}
+
+static void __init map_vsyscall(void)
+{
+ extern char __vsyscall_0;
+ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
+
+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
+}
+
+extern void __set_fixmap_user (enum fixed_addresses, unsigned long, pgprot_t);
+
+static void __init map_vsyscall_user(void)
+{
+ extern char __vsyscall_0;
+ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
+
+ __set_fixmap_user(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
+}
+
+static int __init vsyscall_init(void)
+{
+ BUG_ON(((unsigned long) &vgettimeofday !=
+ VSYSCALL_ADDR(__NR_vgettimeofday)));
+ BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
+ BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
+ map_vsyscall();
+ map_vsyscall_user(); /* establish tranlation for user address space */
+ sysctl_vsyscall = 0; /* TBD */
+
+ return 0;
+}
+
+__initcall(vsyscall_init);
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/x8664_ksyms.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/x8664_ksyms.c
new file mode 100644
index 0000000000..8ec2c0fdae
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/x8664_ksyms.c
@@ -0,0 +1,223 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/user.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/apm_bios.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/syscalls.h>
+#include <linux/tty.h>
+#include <linux/ioctl32.h>
+
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/mmx.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/nmi.h>
+#include <asm/kdebug.h>
+#include <asm/unistd.h>
+#include <asm/tlbflush.h>
+#include <asm/kdebug.h>
+
+extern spinlock_t rtc_lock;
+
+#ifdef CONFIG_SMP
+extern void __write_lock_failed(rwlock_t *rw);
+extern void __read_lock_failed(rwlock_t *rw);
+#endif
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
+extern struct drive_info_struct drive_info;
+EXPORT_SYMBOL(drive_info);
+#endif
+
+extern unsigned long get_cmos_time(void);
+
+/* platform dependent support */
+EXPORT_SYMBOL(boot_cpu_data);
+//EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(ioremap_nocache);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(disable_irq_nosync);
+EXPORT_SYMBOL(probe_irq_mask);
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(pm_idle);
+// EXPORT_SYMBOL(pm_power_off);
+EXPORT_SYMBOL(get_cmos_time);
+
+EXPORT_SYMBOL(__down_failed);
+EXPORT_SYMBOL(__down_failed_interruptible);
+EXPORT_SYMBOL(__down_failed_trylock);
+EXPORT_SYMBOL(__up_wakeup);
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(ip_compute_csum);
+/* Delay loops */
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__ndelay);
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(__const_udelay);
+
+EXPORT_SYMBOL(__get_user_1);
+EXPORT_SYMBOL(__get_user_2);
+EXPORT_SYMBOL(__get_user_4);
+EXPORT_SYMBOL(__get_user_8);
+EXPORT_SYMBOL(__put_user_1);
+EXPORT_SYMBOL(__put_user_2);
+EXPORT_SYMBOL(__put_user_4);
+EXPORT_SYMBOL(__put_user_8);
+
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+
+EXPORT_SYMBOL(strncpy_from_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(clear_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(copy_user_generic);
+EXPORT_SYMBOL(copy_from_user);
+EXPORT_SYMBOL(copy_to_user);
+EXPORT_SYMBOL(copy_in_user);
+EXPORT_SYMBOL(strnlen_user);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+#endif
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pcibios_penalize_isa_irq);
+EXPORT_SYMBOL(pci_mem_start);
+#endif
+
+EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(clear_page);
+
+EXPORT_SYMBOL(cpu_pda);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(cpu_data);
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(__write_lock_failed);
+EXPORT_SYMBOL(__read_lock_failed);
+
+EXPORT_SYMBOL(synchronize_irq);
+EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(cpu_callout_map);
+#endif
+
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(screen_info);
+#endif
+
+EXPORT_SYMBOL(get_wchan);
+
+EXPORT_SYMBOL(rtc_lock);
+
+/* EXPORT_SYMBOL_GPL(set_nmi_callback);
+ EXPORT_SYMBOL_GPL(unset_nmi_callback); */
+
+/* Export string functions. We normally rely on gcc builtin for most of these,
+ but gcc sometimes decides not to inline them. */
+#undef memcpy
+#undef memset
+#undef memmove
+#undef memchr
+#undef strlen
+#undef strcpy
+#undef strncmp
+#undef strncpy
+#undef strchr
+#undef strcmp
+#undef strcpy
+#undef strcat
+#undef memcmp
+
+extern void * memset(void *,int,__kernel_size_t);
+extern size_t strlen(const char *);
+extern void * memmove(void * dest,const void *src,size_t count);
+extern char * strcpy(char * dest,const char *src);
+extern int strcmp(const char * cs,const char * ct);
+extern void *memchr(const void *s, int c, size_t n);
+extern void * memcpy(void *,const void *,__kernel_size_t);
+extern void * __memcpy(void *,const void *,__kernel_size_t);
+extern char * strcat(char *, const char *);
+extern int memcmp(const void * cs,const void * ct,size_t count);
+
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(memcmp);
+
+#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
+/* prototypes are wrong, these are assembly with custom calling functions */
+extern void rwsem_down_read_failed_thunk(void);
+extern void rwsem_wake_thunk(void);
+extern void rwsem_downgrade_thunk(void);
+extern void rwsem_down_write_failed_thunk(void);
+EXPORT_SYMBOL(rwsem_down_read_failed_thunk);
+EXPORT_SYMBOL(rwsem_wake_thunk);
+EXPORT_SYMBOL(rwsem_downgrade_thunk);
+EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
+#endif
+
+EXPORT_SYMBOL(empty_zero_page);
+
+#ifdef CONFIG_HAVE_DEC_LOCK
+EXPORT_SYMBOL(_atomic_dec_and_lock);
+#endif
+
+EXPORT_SYMBOL(die_chain);
+EXPORT_SYMBOL(register_die_notifier);
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(cpu_sibling_map);
+EXPORT_SYMBOL(smp_num_siblings);
+#endif
+
+extern void do_softirq_thunk(void);
+EXPORT_SYMBOL(do_softirq_thunk);
+
+void out_of_line_bug(void);
+EXPORT_SYMBOL(out_of_line_bug);
+
+EXPORT_SYMBOL(init_level4_pgt);
+
+extern unsigned long __supported_pte_mask;
+EXPORT_SYMBOL(__supported_pte_mask);
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL_GPL(flush_tlb_all);
+#endif
+
+EXPORT_SYMBOL(cpu_khz);
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/Makefile b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/Makefile
new file mode 100644
index 0000000000..9b0223649f
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/Makefile
@@ -0,0 +1,26 @@
+#
+# Makefile for the linux x86_64-specific parts of the memory manager.
+#
+
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/mm
+
+obj-y := init.o fault.o ioremap.o pageattr.o hypervisor.o
+c-obj-y := extable.o
+
+#obj-y := init.o fault.o ioremap.o extable.o pageattr.o
+#c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+c-obj-$(CONFIG_DISCONTIGMEM) += numa.o
+c-obj-$(CONFIG_K8_NUMA) += k8topology.o
+
+hugetlbpage-y = ../../../i386/mm/hugetlbpage.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+ @ln -fsn $(srctree)/arch/x86_64/mm/$(notdir $@) $@
+
+obj-y += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/fault.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/fault.c
new file mode 100644
index 0000000000..636cd1873f
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/fault.c
@@ -0,0 +1,591 @@
+/*
+ * linux/arch/x86-64/mm/fault.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h> /* For unblank_screen() */
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/kprobes.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/proto.h>
+#include <asm/kdebug.h>
+#include <asm-generic/sections.h>
+#include <asm/kdebug.h>
+
+DEFINE_PER_CPU(pgd_t *, cur_pgd);
+
+void bust_spinlocks(int yes)
+{
+ int loglevel_save = console_loglevel;
+ if (yes) {
+ oops_in_progress = 1;
+ } else {
+#ifdef CONFIG_VT
+ unblank_screen();
+#endif
+ oops_in_progress = 0;
+ /*
+ * OK, the message is on the console. Now we call printk()
+ * without oops_in_progress set so that printk will give klogd
+ * a poke. Hold onto your hats...
+ */
+ console_loglevel = 15; /* NMI oopser may have shut the console up */
+ printk(" ");
+ console_loglevel = loglevel_save;
+ }
+}
+
+/* Sometimes the CPU reports invalid exceptions on prefetch.
+ Check that here and ignore.
+ Opcode checker based on code by Richard Brunner */
+static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+ unsigned long error_code)
+{
+ unsigned char *instr = (unsigned char *)(regs->rip);
+ int scan_more = 1;
+ int prefetch = 0;
+ unsigned char *max_instr = instr + 15;
+
+ /* If it was a exec fault ignore */
+ if (error_code & (1<<4))
+ return 0;
+
+ /* Code segments in LDT could have a non zero base. Don't check
+ when that's possible */
+ if (regs->cs & (1<<2))
+ return 0;
+
+ if ((regs->cs & 3) != 0 && regs->rip >= TASK_SIZE)
+ return 0;
+
+ while (scan_more && instr < max_instr) {
+ unsigned char opcode;
+ unsigned char instr_hi;
+ unsigned char instr_lo;
+
+ if (__get_user(opcode, instr))
+ break;
+
+ instr_hi = opcode & 0xf0;
+ instr_lo = opcode & 0x0f;
+ instr++;
+
+ switch (instr_hi) {
+ case 0x20:
+ case 0x30:
+ /* Values 0x26,0x2E,0x36,0x3E are valid x86
+ prefixes. In long mode, the CPU will signal
+ invalid opcode if some of these prefixes are
+ present so we will never get here anyway */
+ scan_more = ((instr_lo & 7) == 0x6);
+ break;
+
+ case 0x40:
+ /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
+ Need to figure out under what instruction mode the
+ instruction was issued ... */
+ /* Could check the LDT for lm, but for now it's good
+ enough to assume that long mode only uses well known
+ segments or kernel. */
+ scan_more = ((regs->cs & 3) == 0) || (regs->cs == __USER_CS);
+ break;
+
+ case 0x60:
+ /* 0x64 thru 0x67 are valid prefixes in all modes. */
+ scan_more = (instr_lo & 0xC) == 0x4;
+ break;
+ case 0xF0:
+ /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
+ scan_more = !instr_lo || (instr_lo>>1) == 1;
+ break;
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+ scan_more = 0;
+ if (__get_user(opcode, instr))
+ break;
+ prefetch = (instr_lo == 0xF) &&
+ (opcode == 0x0D || opcode == 0x18);
+ break;
+ default:
+ scan_more = 0;
+ break;
+ }
+ }
+ return prefetch;
+}
+
+static int bad_address(void *p)
+{
+ unsigned long dummy;
+ return __get_user(dummy, (unsigned long *)p);
+}
+
+void dump_pagetable(unsigned long address)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
+ pgd += pgd_index(address);
+
+ printk("PGD %lx ", pgd_val(*pgd));
+ if (bad_address(pgd)) goto bad;
+ if (!pgd_present(*pgd)) goto ret;
+
+ pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
+ if (bad_address(pud)) goto bad;
+ printk("PUD %lx ", pud_val(*pud));
+ if (!pud_present(*pud)) goto ret;
+
+ pmd = pmd_offset(pud, address);
+ if (bad_address(pmd)) goto bad;
+ printk("PMD %lx ", pmd_val(*pmd));
+ if (!pmd_present(*pmd)) goto ret;
+
+ pte = pte_offset_kernel(pmd, address);
+ if (bad_address(pte)) goto bad;
+ printk("PTE %lx", pte_val(*pte));
+ret:
+ printk("\n");
+ return;
+bad:
+ printk("BAD\n");
+}
+
+static const char errata93_warning[] =
+KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
+KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
+KERN_ERR "******* Please consider a BIOS update.\n"
+KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
+
+/* Workaround for K8 erratum #93 & buggy BIOS.
+ BIOS SMM functions are required to use a specific workaround
+ to avoid corruption of the 64bit RIP register on C stepping K8.
+ A lot of BIOS that didn't get tested properly miss this.
+ The OS sees this as a page fault with the upper 32bits of RIP cleared.
+ Try to work around it here.
+ Note we only handle faults in kernel here. */
+
+static int is_errata93(struct pt_regs *regs, unsigned long address)
+{
+ static int warned;
+ if (address != regs->rip)
+ return 0;
+ if ((address >> 32) != 0)
+ return 0;
+ address |= 0xffffffffUL << 32;
+ if ((address >= (u64)_stext && address <= (u64)_etext) ||
+ (address >= MODULES_VADDR && address <= MODULES_END)) {
+ if (!warned) {
+ printk(errata93_warning);
+ warned = 1;
+ }
+ regs->rip = address;
+ return 1;
+ }
+ return 0;
+}
+
+int unhandled_signal(struct task_struct *tsk, int sig)
+{
+ if (tsk->pid == 1)
+ return 1;
+ /* Warn for strace, but not for gdb */
+ if (!test_ti_thread_flag(tsk->thread_info, TIF_SYSCALL_TRACE) &&
+ (tsk->ptrace & PT_PTRACED))
+ return 0;
+ return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
+ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
+}
+
+static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
+ unsigned long error_code)
+{
+ oops_begin();
+ printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
+ current->comm, address);
+ dump_pagetable(address);
+ __die("Bad pagetable", regs, error_code);
+ oops_end();
+ do_exit(SIGKILL);
+}
+
+/*
+ * Handle a fault on the vmalloc or module mapping area
+ */
+static int vmalloc_fault(unsigned long address)
+{
+ pgd_t *pgd, *pgd_ref;
+ pud_t *pud, *pud_ref;
+ pmd_t *pmd, *pmd_ref;
+ pte_t *pte, *pte_ref;
+
+ /* Copy kernel mappings over when needed. This can also
+ happen within a race in page table update. In the later
+ case just flush. */
+
+ pgd = pgd_offset(current->mm ?: &init_mm, address);
+ pgd_ref = pgd_offset_k(address);
+ if (pgd_none(*pgd_ref))
+ return -1;
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+
+ /* Below here mismatches are bugs because these lower tables
+ are shared */
+
+ pud = pud_offset(pgd, address);
+ pud_ref = pud_offset(pgd_ref, address);
+ if (pud_none(*pud_ref))
+ return -1;
+ if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
+ BUG();
+ pmd = pmd_offset(pud, address);
+ pmd_ref = pmd_offset(pud_ref, address);
+ if (pmd_none(*pmd_ref))
+ return -1;
+ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+ BUG();
+ pte_ref = pte_offset_kernel(pmd_ref, address);
+ if (!pte_present(*pte_ref))
+ return -1;
+ pte = pte_offset_kernel(pmd, address);
+ if (!pte_present(*pte) || pte_page(*pte) != pte_page(*pte_ref))
+ BUG();
+ __flush_tlb_all();
+ return 0;
+}
+
+int page_fault_trace = 0;
+int exception_trace = 1;
+
+
+#define MEM_VERBOSE 1
+
+#ifdef MEM_VERBOSE
+#define MEM_LOG(_f, _a...) \
+ printk("fault.c:[%d]-> " _f "\n", \
+ __LINE__ , ## _a )
+#else
+#define MEM_LOG(_f, _a...) ((void)0)
+#endif
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * error_code:
+ * bit 0 == 0 means no page found, 1 means protection fault
+ * bit 1 == 0 means read, 1 means write
+ * bit 2 == 0 means kernel, 1 means user-mode
+ * bit 3 == 1 means fault was an instruction fetch
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+ const struct exception_table_entry *fixup;
+ int write;
+ siginfo_t info;
+
+ if (!user_mode(regs))
+ error_code &= ~4; /* means kernel */
+
+#ifdef CONFIG_CHECKING
+ {
+ unsigned long gs;
+ struct x8664_pda *pda = cpu_pda + stack_smp_processor_id();
+ rdmsrl(MSR_GS_BASE, gs);
+ if (gs != (unsigned long)pda) {
+ wrmsrl(MSR_GS_BASE, pda);
+ printk("page_fault: wrong gs %lx expected %p\n", gs, pda);
+ }
+ }
+#endif
+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
+
+ if (likely(regs->eflags & X86_EFLAGS_IF))
+ local_irq_enable();
+
+ if (unlikely(page_fault_trace))
+ printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
+ regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
+
+ tsk = current;
+ mm = tsk->mm;
+ info.si_code = SEGV_MAPERR;
+
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * This verifies that the fault happens in kernel space
+ * (error_code & 4) == 0, and that the fault was not a
+ * protection error (error_code & 1) == 0.
+ */
+ if (unlikely(address >= TASK_SIZE)) {
+ if (!(error_code & 5)) {
+ if (vmalloc_fault(address) < 0)
+ goto bad_area_nosemaphore;
+ return;
+ }
+ /*
+ * Don't take the mm semaphore here. If we fixup a prefetch
+ * fault we could otherwise deadlock.
+ */
+ goto bad_area_nosemaphore;
+ }
+
+ if (unlikely(error_code & (1 << 3)))
+ pgtable_bad(address, regs, error_code);
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (unlikely(in_atomic() || !mm))
+ goto bad_area_nosemaphore;
+
+ again:
+ /* When running in the kernel we expect faults to occur only to
+ * addresses in user space. All other faults represent errors in the
+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
+ * erroneous fault occuring in a code path which already holds mmap_sem
+ * we will deadlock attempting to validate the fault against the
+ * address space. Luckily the kernel only validly references user
+ * space from well defined areas of code, which are listed in the
+ * exceptions table.
+ *
+ * As the vast majority of faults will be valid we will only perform
+ * the source reference check when there is a possibilty of a deadlock.
+ * Attempt to lock the address space, if we cannot we then validate the
+ * source. If this is invalid we can skip the address space check,
+ * thus avoiding the deadlock.
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if ((error_code & 4) == 0 &&
+ !search_exception_tables(regs->rip))
+ goto bad_area_nosemaphore;
+ down_read(&mm->mmap_sem);
+ }
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (likely(vma->vm_start <= address))
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (error_code & 4) {
+ // XXX: align red zone size with ABI
+ if (address + 128 < regs->rsp)
+ goto bad_area;
+ }
+ if (expand_stack(vma, address))
+ goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (error_code & 3) {
+ default: /* 3: write, present */
+ /* fall through */
+ case 2: /* write, not present */
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ write++;
+ break;
+ case 1: /* read, present */
+ goto bad_area;
+ case 0: /* read, not present */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ switch (handle_mm_fault(mm, vma, address, write)) {
+ case 1:
+ tsk->min_flt++;
+ break;
+ case 2:
+ tsk->maj_flt++;
+ break;
+ case 0:
+ goto do_sigbus;
+ default:
+ goto out_of_memory;
+ }
+
+ up_read(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+
+#ifdef CONFIG_IA32_EMULATION
+ /* 32bit vsyscall. map on demand. */
+ if (test_thread_flag(TIF_IA32) &&
+ address >= VSYSCALL32_BASE && address < VSYSCALL32_END) {
+ if (map_syscall32(mm, address) < 0)
+ goto out_of_memory2;
+ return;
+ }
+#endif
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & 4) {
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+ /* Work around K8 erratum #100 K8 in compat mode
+ occasionally jumps to illegal addresses >4GB. We
+ catch this here in the page fault handler because
+ these addresses are not reachable. Just detect this
+ case and return. Any code segment in LDT is
+ compatibility mode. */
+ if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
+ (address >> 32))
+ return;
+
+ if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
+ printk(
+ "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
+ tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
+ tsk->comm, tsk->pid, address, regs->rip,
+ regs->rsp, error_code);
+ }
+
+ tsk->thread.cr2 = address;
+ /* Kernel addresses are always protection faults */
+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+
+ /* Are we prepared to handle this kernel fault? */
+ fixup = search_exception_tables(regs->rip);
+ if (fixup) {
+ regs->rip = fixup->fixup;
+ return;
+ }
+
+ /*
+ * Hall of shame of CPU/BIOS bugs.
+ */
+
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+ if (is_errata93(regs, address))
+ return;
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ oops_begin();
+
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(" at %016lx RIP: \n" KERN_ALERT,address);
+ printk_address(regs->rip);
+ printk("\n");
+ dump_pagetable(address);
+ __die("Oops", regs, error_code);
+ /* Executive summary in case the body of the oops scrolled away */
+ printk(KERN_EMERG "CR2: %016lx\n", address);
+ oops_end();
+ do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (current->pid == 1) {
+ yield();
+ goto again;
+ }
+ printk("VM: killing process %s\n", tsk->comm);
+ if (error_code & 4)
+ do_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!(error_code & 4))
+ goto no_context;
+
+ tsk->thread.cr2 = address;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+ return;
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c
new file mode 100644
index 0000000000..83a922954c
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c
@@ -0,0 +1,275 @@
+/******************************************************************************
+ * mm/hypervisor.c
+ *
+ * Update page tables via the hypervisor.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+/*
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Added hypercalls for x86-64.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/balloon.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#include <linux/percpu.h>
+#endif
+
+void xen_l1_entry_update(pte_t *ptr, unsigned long val)
+{
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+ u.val = val;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
+{
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+ u.val = val.pmd;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_l3_entry_update(pud_t *ptr, pud_t val)
+{
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+ u.val = val.pud;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
+{
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+ u.val = val.pgd;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_pt_switch(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_NEW_BASEPTR;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_new_user_pt(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_NEW_USER_BASEPTR;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_tlb_flush(void)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_invlpg(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_INVLPG_LOCAL;
+ op.linear_addr = ptr & PAGE_MASK;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+#ifdef CONFIG_SMP
+void xen_tlb_flush_all(void)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_TLB_FLUSH_ALL;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_tlb_flush_mask(cpumask_t *mask)
+{
+ struct mmuext_op op;
+ if ( cpus_empty(*mask) )
+ return;
+ op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+ op.cpuset = mask->bits;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_invlpg_all(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_INVLPG_ALL;
+ op.linear_addr = ptr & PAGE_MASK;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
+{
+ struct mmuext_op op;
+ if ( cpus_empty(*mask) )
+ return;
+ op.cmd = MMUEXT_INVLPG_MULTI;
+ op.cpuset = mask->bits;
+ op.linear_addr = ptr & PAGE_MASK;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+#endif
+
+void xen_pgd_pin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_PIN_L4_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_pgd_unpin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_UNPIN_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_pud_pin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_PIN_L3_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_pud_unpin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_UNPIN_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_pmd_pin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_PIN_L2_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_pmd_unpin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_UNPIN_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_pte_pin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_PIN_L1_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+
+}
+
+void xen_pte_unpin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_UNPIN_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_set_ldt(unsigned long ptr, unsigned long len)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_SET_LDT;
+ op.linear_addr = ptr;
+ op.nr_ents = len;
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_machphys_update(unsigned long mfn, unsigned long pfn)
+{
+ mmu_update_t u;
+ u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ u.val = pfn;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+}
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+unsigned long allocate_empty_lowmem_region(unsigned long pages)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long *pfn_array;
+ unsigned long vstart;
+ unsigned long i;
+ unsigned int order = get_order(pages*PAGE_SIZE);
+
+ vstart = __get_free_pages(GFP_KERNEL, order);
+ if ( vstart == 0 )
+ return 0UL;
+
+ scrub_pages(vstart, 1 << order);
+
+ pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
+ if ( pfn_array == NULL )
+ BUG();
+
+ for ( i = 0; i < (1<<order); i++ )
+ {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ pfn_array[i] = pte->pte >> PAGE_SHIFT;
+ xen_l1_entry_update(pte, 0);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+ (u32)INVALID_P2M_ENTRY;
+ }
+
+ /* Flush updates through and flush the TLB. */
+ flush_tlb_all();
+
+ balloon_put_pages(pfn_array, 1 << order);
+
+ vfree(pfn_array);
+
+ return vstart;
+}
+
+#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c
new file mode 100644
index 0000000000..c43d567c5b
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c
@@ -0,0 +1,935 @@
+/*
+ * linux/arch/x86_64/mm/init.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
+ *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen.
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/proc_fs.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/dma.h>
+#include <asm/fixmap.h>
+#include <asm/e820.h>
+#include <asm/apic.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/proto.h>
+#include <asm/smp.h>
+
+#ifndef Dprintk
+#define Dprintk(x...)
+#endif
+
+#ifdef CONFIG_GART_IOMMU
+extern int swiotlb;
+#endif
+
+extern char _stext[];
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+extern unsigned long start_pfn;
+
+static int init_mapping_done;
+
+/*
+ * Use this until direct mapping is established, i.e. before __va() is
+ * avaialble in init_memory_mapping().
+ */
+
+#define addr_to_page(addr, page) \
+ (addr) &= PHYSICAL_PAGE_MASK; \
+ (page) = ((unsigned long *) ((unsigned long)(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + __START_KERNEL_map)))
+
+static void __make_page_readonly(unsigned long va)
+{
+ unsigned long addr;
+ pte_t *pte;
+ unsigned long *page = (unsigned long *) init_level4_pgt;
+
+ addr = (unsigned long) page[pgd_index(va)];
+ addr_to_page(addr, page);
+
+ addr = page[pud_index(va)];
+ addr_to_page(addr, page);
+
+ addr = page[pmd_index(va)];
+ addr_to_page(addr, page);
+
+ pte = (pte_t *) &page[pte_index(va)];
+ xen_l1_entry_update(pte, (*(unsigned long*)pte) & ~_PAGE_RW);
+ __flush_tlb_one(addr);
+}
+
+static void __make_page_writable(unsigned long va)
+{
+ unsigned long addr;
+ pte_t *pte;
+ unsigned long *page = (unsigned long *) init_level4_pgt;
+
+ addr = (unsigned long) page[pgd_index(va)];
+ addr_to_page(addr, page);
+
+ addr = page[pud_index(va)];
+ addr_to_page(addr, page);
+
+ addr = page[pmd_index(va)];
+ addr_to_page(addr, page);
+
+ pte = (pte_t *) &page[pte_index(va)];
+ xen_l1_entry_update(pte, (*(unsigned long*)pte)| _PAGE_RW);
+ __flush_tlb_one(addr);
+}
+
+
+/*
+ * Assume the tranlation is already established.
+ */
+void make_page_readonly(void *va)
+{
+ pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t *pte;
+ unsigned long addr = (unsigned long) va;
+
+ if (!init_mapping_done) {
+ __make_page_readonly(addr);
+ return;
+ }
+
+ pgd = pgd_offset_k(addr);
+ pud = pud_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
+ pte = pte_offset_kernel(pmd, addr);
+ xen_l1_entry_update(pte, (*(unsigned long*)pte)&~_PAGE_RW);
+ __flush_tlb_one(addr);
+}
+
+void make_page_writable(void *va)
+{
+ pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t *pte;
+ unsigned long addr = (unsigned long) va;
+
+ if (!init_mapping_done) {
+ __make_page_writable(addr);
+ return;
+ }
+
+ pgd = pgd_offset_k(addr);
+ pud = pud_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
+ pte = pte_offset_kernel(pmd, addr);
+ xen_l1_entry_update(pte, (*(unsigned long*)pte)|_PAGE_RW);
+ __flush_tlb_one(addr);
+}
+
+void make_pages_readonly(void* va, unsigned nr)
+{
+ while ( nr-- != 0 ) {
+ make_page_readonly(va);
+ va = (void*)((unsigned long)va + PAGE_SIZE);
+ }
+}
+
+void make_pages_writable(void* va, unsigned nr)
+{
+ while ( nr-- != 0 ) {
+ make_page_writable(va);
+ va = (void*)((unsigned long)va + PAGE_SIZE);
+ }
+}
+
+/*
+ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
+ * physical space so we can cache the place of the first one and move
+ * around without checking the pgd every time.
+ */
+
+void show_mem(void)
+{
+ int i, total = 0, reserved = 0;
+ int shared = 0, cached = 0;
+ pg_data_t *pgdat;
+ struct page *page;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+
+ for_each_pgdat(pgdat) {
+ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
+ page = pfn_to_page(pgdat->node_start_pfn + i);
+ total++;
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (page_count(page))
+ shared += page_count(page) - 1;
+ }
+ }
+ printk("%d pages of RAM\n", total);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
+}
+
+/* References to section boundaries */
+
+extern char _text, _etext, _edata, __bss_start, _end[];
+extern char __init_begin, __init_end;
+
+int after_bootmem;
+
+static void *spp_getpage(void)
+{
+ void *ptr;
+ if (after_bootmem)
+ ptr = (void *) get_zeroed_page(GFP_ATOMIC);
+ else
+ ptr = alloc_bootmem_pages(PAGE_SIZE);
+ if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
+ panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
+
+ Dprintk("spp_getpage %p\n", ptr);
+ return ptr;
+}
+
+#define pgd_offset_u(address) (pgd_t *)(init_level4_user_pgt + pgd_index(address))
+
+static inline pud_t *pud_offset_u(unsigned long address)
+{
+ pud_t *pud = level3_user_pgt;
+
+ return pud + pud_index(address);
+}
+
+static void set_pte_phys(unsigned long vaddr,
+ unsigned long phys, pgprot_t prot, int user_mode)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte, new_pte;
+
+ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
+
+ pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
+
+ if (pgd_none(*pgd)) {
+ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
+ return;
+ }
+
+ pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
+
+ if (pud_none(*pud)) {
+ pmd = (pmd_t *) spp_getpage();
+
+ make_page_readonly(pmd);
+ xen_pmd_pin(__pa(pmd));
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
+ if (pmd != pmd_offset(pud, 0)) {
+ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
+ return;
+ }
+ }
+
+ pmd = pmd_offset(pud, vaddr);
+
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) spp_getpage();
+ make_page_readonly(pte);
+
+ xen_pte_pin(__pa(pte));
+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
+ if (pte != pte_offset_kernel(pmd, 0)) {
+ printk("PAGETABLE BUG #02!\n");
+ return;
+ }
+ }
+ new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
+
+ pte = pte_offset_kernel(pmd, vaddr);
+
+ if (!pte_none(*pte) &&
+ pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
+ pte_ERROR(*pte);
+ xen_l1_entry_update(pte, new_pte.pte);
+
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+static void set_pte_phys_ma(unsigned long vaddr,
+ unsigned long phys, pgprot_t prot)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte, new_pte;
+
+ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
+
+ pgd = pgd_offset_k(vaddr);
+ if (pgd_none(*pgd)) {
+ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
+ return;
+ }
+ pud = pud_offset(pgd, vaddr);
+ if (pud_none(*pud)) {
+
+ pmd = (pmd_t *) spp_getpage();
+ make_page_readonly(pmd);
+ xen_pmd_pin(__pa(pmd));
+
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
+
+ if (pmd != pmd_offset(pud, 0)) {
+ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
+ return;
+ }
+ }
+ pmd = pmd_offset(pud, vaddr);
+
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) spp_getpage();
+ make_page_readonly(pte);
+ xen_pte_pin(__pa(pte));
+
+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
+ if (pte != pte_offset_kernel(pmd, 0)) {
+ printk("PAGETABLE BUG #02!\n");
+ return;
+ }
+ }
+
+ new_pte = pfn_pte_ma(phys >> PAGE_SHIFT, prot);
+ pte = pte_offset_kernel(pmd, vaddr);
+
+ if (!pte_none(*pte) &&
+ pte_val_ma(*pte) != (pte_val_ma(new_pte) & __supported_pte_mask))
+ pte_ERROR(*pte);
+
+ /*
+ * Note that the pte page is already RO, thus we want to use
+ * xen_l1_entry_update(), not set_pte().
+ */
+ xen_l1_entry_update(pte,
+ (pfn_pte_ma(phys >> PAGE_SHIFT, prot).pte));
+
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+#define SET_FIXMAP_KERNEL 0
+#define SET_FIXMAP_USER 1
+
+/* NOTE: this is meant to be run only at boot */
+void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ printk("Invalid __set_fixmap\n");
+ return;
+ }
+ switch (idx) {
+ case VSYSCALL_FIRST_PAGE:
+ set_pte_phys(address, phys, prot, SET_FIXMAP_KERNEL);
+ break;
+ default:
+ set_pte_phys_ma(address, phys, prot);
+ break;
+ }
+}
+
+
+/*
+ * At this point it only supports vsyscall area.
+ */
+void __set_fixmap_user (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ printk("Invalid __set_fixmap\n");
+ return;
+ }
+
+ set_pte_phys(address, phys, prot, SET_FIXMAP_USER);
+
+#if 0
+ page = (unsigned long *) user_level3_pgt;
+ pud = page[pud_index(address)];
+
+ printk("pud = %p\n", pud);
+
+ pmd = (pmd_t *) spp_getpage();
+ printk("alloc pmd = %p\n", pmd);
+
+ make_page_readonly((unsigned long)pmd);
+
+ xen_pmd_pin(__pa(pmd));
+
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
+
+ printk("after set_pud\n");
+
+ pte = (pte_t *) spp_getpage();
+ printk("pte = %p\n");
+
+ make_page_readonly((unsigned long)pte);
+
+ xen_pte_pin(__pa(pte));
+
+ page = (unsigned long *) pud;
+ pud = page[pud_index(address)];
+
+
+ pmd = pmd_offset(pud, vaddr);
+
+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
+#endif
+
+}
+
+unsigned long __initdata table_start, table_end, tables_reserved;
+
+#if 0
+/*
+ * Get the machine PFN given va
+ */
+static unsigned long get_machine_pfn(unsigned long va)
+{
+ unsigned long addr;
+ pte_t *pte;
+
+ unsigned long *page = (unsigned long *) init_level4_pgt;
+
+ addr = (unsigned long) page[pgd_index(va)];
+ addr &= PHYSICAL_PAGE_MASK;
+ page = (unsigned long *) ((unsigned long)(((mfn_to_pfn(addr >> PAGE_SHIFT)) << PAGE_SHIFT) + __START_KERNEL_map));
+
+ addr = page[pud_index(va)];
+ addr &= PHYSICAL_PAGE_MASK;
+ page = (unsigned long *) ((unsigned long)(((mfn_to_pfn(addr >> PAGE_SHIFT)) << PAGE_SHIFT) + __START_KERNEL_map));
+
+ addr = page[pmd_index(va)];
+ addr &= PHYSICAL_PAGE_MASK;
+ page = (unsigned long *) ((unsigned long)(((mfn_to_pfn(addr >> PAGE_SHIFT)) << PAGE_SHIFT)+ __START_KERNEL_map));
+
+ pte = (pte_t *) &page[pte_index(va)];
+
+ return (unsigned long) (pte->pte >> PAGE_SHIFT);
+}
+#endif
+
+unsigned long get_machine_pfn(unsigned long addr)
+{
+ pud_t* pud = pud_offset_k(addr);
+ pmd_t* pmd = pmd_offset(pud, addr);
+ pte_t *pte = pte_offset_kernel(pmd, addr);
+
+ return (pte->pte >> PAGE_SHIFT);
+}
+
+
+/*
+ * We start using from start_pfn
+ */
+static __init void *alloc_low_page(unsigned long *phys)
+{
+ unsigned long pfn = table_end++;
+
+ *phys = (pfn << PAGE_SHIFT);
+ memset((void *) ((pfn << PAGE_SHIFT) + __START_KERNEL_map), 0, PAGE_SIZE);
+
+ return (void *)((pfn << PAGE_SHIFT) + __START_KERNEL_map);
+}
+
+#define PTE_SIZE PAGE_SIZE
+
+static inline void __set_pte(pte_t *dst, pte_t val)
+{
+ *dst = val;
+}
+
+void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
+{
+ long i, j, k;
+ unsigned long paddr;
+
+ i = pud_index(address);
+ pud = pud + i;
+
+ for (; i < PTRS_PER_PUD; pud++, i++) {
+ unsigned long pmd_phys;
+ pmd_t *pmd;
+
+ paddr = address + i*PUD_SIZE;
+ if (paddr >= end) {
+ for (; i < PTRS_PER_PUD; i++, pud++)
+ set_pud(pud, __pud(0));
+ break;
+ }
+
+ pmd = alloc_low_page(&pmd_phys);
+ make_page_readonly(pmd);
+ xen_pmd_pin(pmd_phys);
+ set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
+
+ for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
+ unsigned long pte_phys;
+ pte_t *pte, *pte_save;
+
+ if (paddr >= end) {
+ for (; j < PTRS_PER_PMD; j++, pmd++)
+ set_pmd(pmd, __pmd(0));
+ break;
+ }
+ pte = alloc_low_page(&pte_phys);
+ pte_save = pte;
+ for (k = 0; k < PTRS_PER_PTE; pte++, k++, paddr += PTE_SIZE) {
+ if (paddr < (table_start << PAGE_SHIFT)
+ + tables_reserved)
+ {
+ __set_pte(pte,
+ __pte(paddr | (_KERNPG_TABLE & ~_PAGE_RW)));
+ continue;
+ }
+ if (paddr >= end) {
+ for (; k < PTRS_PER_PTE; k++, pte++)
+ __set_pte(pte, __pte(0));
+ break;
+ }
+ __set_pte(pte, __pte(paddr | _KERNPG_TABLE));
+ }
+ pte = pte_save;
+ make_page_readonly(pte);
+ xen_pte_pin(pte_phys);
+ set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
+ }
+ }
+ __flush_tlb();
+}
+
+static void __init find_early_table_space(unsigned long end)
+{
+ unsigned long puds, pmds, ptes;
+
+ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+ ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
+
+ tables_reserved = round_up(puds*8, PAGE_SIZE) + round_up(pmds * 8, PAGE_SIZE)
+ + round_up(ptes * 8, PAGE_SIZE);
+ table_start = start_pfn;
+ table_end = table_start;
+}
+
+/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
+ This runs before bootmem is initialized and gets pages directly from the
+ physical memory. To access them they are temporarily mapped. */
+void __init init_memory_mapping(unsigned long start, unsigned long end)
+{
+ unsigned long next;
+
+
+ Dprintk("init_memory_mapping\n");
+
+ find_early_table_space(end);
+
+ start = (unsigned long)__va(start);
+ end = (unsigned long)__va(end);
+
+ for (; start < end; start = next) {
+ unsigned long pud_phys;
+ pud_t *pud = alloc_low_page(&pud_phys);
+ make_page_readonly(pud);
+ xen_pud_pin(pud_phys);
+ next = start + PGDIR_SIZE;
+ if (next > end)
+ next = end;
+ phys_pud_init(pud, __pa(start), __pa(next));
+ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
+ }
+
+ early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
+ table_start<<PAGE_SHIFT,
+ table_end<<PAGE_SHIFT);
+
+// start_pfn = table_end;
+
+ /*
+ * TBD: Need to calculate at runtime
+ */
+ start_pfn = (8*0x100000) >> PAGE_SHIFT;
+
+ __flush_tlb_all();
+ init_mapping_done = 1;
+}
+
+extern struct x8664_pda cpu_pda[NR_CPUS];
+
+void zap_low_mappings(void)
+{
+ /* this is not required for Xen */
+#if 0
+ swap_low_mappings();
+#endif
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+void __init paging_init(void)
+{
+ int i;
+
+ {
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ /* unsigned int max_dma; */
+ /* max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; */
+ /* if (end_pfn < max_dma) */
+ zones_size[ZONE_DMA] = end_pfn;
+#if 0
+ else {
+ zones_size[ZONE_DMA] = max_dma;
+ zones_size[ZONE_NORMAL] = end_pfn - max_dma;
+ }
+#endif
+ free_area_init(zones_size);
+ }
+
+ set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+
+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ /* Setup mapping of lower 1st MB */
+ for (i = 0; i < NR_FIX_ISAMAPS; i++)
+ if (xen_start_info.flags & SIF_PRIVILEGED)
+ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
+ else
+ __set_fixmap(FIX_ISAMAP_BEGIN - i,
+ virt_to_machine(empty_zero_page),
+ PAGE_KERNEL_RO);
+#endif
+
+}
+#endif
+
+/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
+ from the CPU leading to inconsistent cache lines. address and size
+ must be aligned to 2MB boundaries.
+ Does nothing when the mapping doesn't exist. */
+void __init clear_kernel_mapping(unsigned long address, unsigned long size)
+{
+ unsigned long end = address + size;
+
+ BUG_ON(address & ~LARGE_PAGE_MASK);
+ BUG_ON(size & ~LARGE_PAGE_MASK);
+
+ for (; address < end; address += LARGE_PAGE_SIZE) {
+ pgd_t *pgd = pgd_offset_k(address);
+ pud_t *pud;
+ pmd_t *pmd;
+ if (pgd_none(*pgd))
+ continue;
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud))
+ continue;
+ pmd = pmd_offset(pud, address);
+ if (!pmd || pmd_none(*pmd))
+ continue;
+ if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
+ /* Could handle this, but it should not happen currently. */
+ printk(KERN_ERR
+ "clear_kernel_mapping: mapping has been split. will leak memory\n");
+ pmd_ERROR(*pmd);
+ }
+ set_pmd(pmd, __pmd(0));
+ }
+ __flush_tlb_all();
+}
+
+static inline int page_is_ram (unsigned long pagenr)
+{
+ if (pagenr < start_pfn || pagenr >= end_pfn)
+ return 0;
+
+ return 1;
+}
+
+extern int swiotlb_force;
+
+static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
+ kcore_vsyscall;
+
+void __init mem_init(void)
+{
+ int codesize, reservedpages, datasize, initsize;
+ int tmp;
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_force)
+ swiotlb = 1;
+ if (!iommu_aperture &&
+ (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))
+ swiotlb = 1;
+ if (swiotlb)
+ swiotlb_init();
+#endif
+
+ /* How many end-of-memory variables you have, grandma! */
+ max_low_pfn = end_pfn;
+ max_pfn = end_pfn;
+ num_physpages = end_pfn;
+ high_memory = (void *) __va(end_pfn * PAGE_SIZE);
+
+ /* clear the zero-page */
+ memset(empty_zero_page, 0, PAGE_SIZE);
+
+ reservedpages = 0;
+
+ /* this will put all low memory onto the freelists */
+#ifdef CONFIG_DISCONTIGMEM
+ totalram_pages += numa_free_all_bootmem();
+ tmp = 0;
+ /* should count reserved pages here for all nodes */
+#else
+ max_mapnr = end_pfn;
+ if (!mem_map) BUG();
+
+ totalram_pages += free_all_bootmem();
+
+ for (tmp = 0; tmp < end_pfn; tmp++)
+ /*
+ * Only count reserved RAM pages
+ */
+ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
+ reservedpages++;
+#endif
+
+ after_bootmem = 1;
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ /* Register memory areas for /proc/kcore */
+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
+ VMALLOC_END-VMALLOC_START);
+ kclist_add(&kcore_kernel, &_stext, _end - _stext);
+ kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
+ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
+ VSYSCALL_END - VSYSCALL_START);
+
+ printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ end_pfn << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10);
+
+ /*
+ * Subtle. SMP is doing its boot stuff late (because it has to
+ * fork idle threads) - but it also needs low mappings for the
+ * protected-mode entry to work. We zap these entries only after
+ * the WP-bit has been tested.
+ */
+#ifndef CONFIG_SMP
+ zap_low_mappings();
+#endif
+}
+
+extern char __initdata_begin[], __initdata_end[];
+
+void free_initmem(void)
+{
+#ifdef __DO_LATER__
+ /*
+ * Some pages can be pinned, but some are not. Unpinning such pages
+ * triggers BUG().
+ */
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
+ memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
+ xen_pte_unpin(__pa(addr));
+ make_page_writable(__va(__pa(addr)));
+ /*
+ * Make pages from __PAGE_OFFSET address as well
+ */
+ make_page_writable((void *)addr);
+ free_page(addr);
+ totalram_pages++;
+ }
+ memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
+ printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
+#endif
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (start < (unsigned long)&_end)
+ return;
+ printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
+ free_page(start);
+ totalram_pages++;
+ }
+}
+#endif
+
+void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
+{
+ /* Should check here against the e820 map to avoid double free */
+#ifdef CONFIG_DISCONTIGMEM
+ int nid = phys_to_nid(phys);
+ reserve_bootmem_node(NODE_DATA(nid), phys, len);
+#else
+ reserve_bootmem(phys, len);
+#endif
+}
+
+int kern_addr_valid(unsigned long addr)
+{
+ unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (above != 0 && above != -1UL)
+ return 0;
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(*pgd))
+ return 0;
+
+ pud = pud_offset_k(addr);
+ if (pud_none(*pud))
+ return 0;
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return 0;
+ if (pmd_large(*pmd))
+ return pfn_valid(pmd_pfn(*pmd));
+
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_none(*pte))
+ return 0;
+ return pfn_valid(pte_pfn(*pte));
+}
+
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+
+extern int exception_trace, page_fault_trace;
+
+static ctl_table debug_table2[] = {
+ { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
+ proc_dointvec },
+#ifdef CONFIG_CHECKING
+ { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
+ proc_dointvec },
+#endif
+ { 0, }
+};
+
+static ctl_table debug_root_table2[] = {
+ { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
+ .child = debug_table2 },
+ { 0 },
+};
+
+static __init int x8664_sysctl_init(void)
+{
+ register_sysctl_table(debug_root_table2, 1);
+ return 0;
+}
+__initcall(x8664_sysctl_init);
+#endif
+
+/* Pseudo VMAs to allow ptrace access for the vsyscall pages. x86-64 has two
+ different ones: one for 32bit and one for 64bit. Use the appropiate
+ for the target task. */
+
+static struct vm_area_struct gate_vma = {
+ .vm_start = VSYSCALL_START,
+ .vm_end = VSYSCALL_END,
+ .vm_page_prot = PAGE_READONLY
+};
+
+static struct vm_area_struct gate32_vma = {
+ .vm_start = VSYSCALL32_BASE,
+ .vm_end = VSYSCALL32_END,
+ .vm_page_prot = PAGE_READONLY
+};
+
+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+{
+#ifdef CONFIG_IA32_EMULATION
+ if (test_tsk_thread_flag(tsk, TIF_IA32)) {
+ /* lookup code assumes the pages are present. set them up
+ now */
+ if (__map_syscall32(tsk->mm, VSYSCALL32_BASE) < 0)
+ return NULL;
+ return &gate32_vma;
+ }
+#endif
+ return &gate_vma;
+}
+
+int in_gate_area(struct task_struct *task, unsigned long addr)
+{
+ struct vm_area_struct *vma = get_gate_vma(task);
+ return (addr >= vma->vm_start) && (addr < vma->vm_end);
+}
+
+/* Use this when you have no reliable task/vma, typically from interrupt
+ * context. It is less reliable than using the task's vma and may give
+ * false positives.
+ */
+int in_gate_area_no_task(unsigned long addr)
+{
+ return (((addr >= VSYSCALL_START) && (addr < VSYSCALL_END)) ||
+ ((addr >= VSYSCALL32_BASE) && (addr < VSYSCALL32_END)));
+}
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/ioremap.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/ioremap.c
new file mode 100644
index 0000000000..3714880f72
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/ioremap.c
@@ -0,0 +1,466 @@
+/*
+ * arch/x86_64/mm/ioremap.c
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ * This is needed for high PCI addresses that aren't mapped in the
+ * 640k-1MB IO memory area on PC's
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/fixmap.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+
+/*
+ * Reuse arch/xen/i396/mm/ioremap.c. Need to merge later
+ */
+#ifndef CONFIG_XEN_PHYSDEV_ACCESS
+
+void * __ioremap(unsigned long phys_addr, unsigned long size,
+ unsigned long flags)
+{
+ return NULL;
+}
+
+void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+ return NULL;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+}
+
+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+{
+ return NULL;
+}
+
+void __init bt_iounmap(void *addr, unsigned long size)
+{
+}
+
+#else
+
+#if defined(__i386__)
+/*
+ * Does @address reside within a non-highmem page that is local to this virtual
+ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
+ * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
+ * why this works.
+ */
+static inline int is_local_lowmem(unsigned long address)
+{
+ extern unsigned long max_low_pfn;
+ unsigned long mfn = address >> PAGE_SHIFT;
+ unsigned long pfn = mfn_to_pfn(mfn);
+ return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
+}
+#elif defined(__x86_64__)
+/*
+ *
+ */
+static inline int is_local_lowmem(unsigned long address)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Generic mapping function (not visible outside):
+ */
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+{
+ void __iomem * addr;
+ struct vm_struct * area;
+ unsigned long offset, last_addr;
+ domid_t domid = DOMID_IO;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ /*
+ * Don't remap the low PCI/ISA area, it's always mapped..
+ */
+ if (phys_addr >= 0x0 && last_addr < 0x100000)
+ return isa_bus_to_virt(phys_addr);
+#endif
+
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+ if (is_local_lowmem(phys_addr)) {
+ char *t_addr, *t_end;
+ struct page *page;
+
+ t_addr = bus_to_virt(phys_addr);
+ t_end = t_addr + (size - 1);
+
+ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
+ if(!PageReserved(page))
+ return NULL;
+
+ domid = DOMID_LOCAL;
+ }
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr+1) - phys_addr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
+ if (!area)
+ return NULL;
+ area->phys_addr = phys_addr;
+ addr = (void __iomem *) area->addr;
+ if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
+ size, __pgprot(_PAGE_PRESENT | _PAGE_RW |
+ _PAGE_DIRTY | _PAGE_ACCESSED
+#if defined(__x86_64__)
+ | _PAGE_USER
+#endif
+ | flags), domid)) {
+ vunmap((void __force *) addr);
+ return NULL;
+ }
+ return (void __iomem *) (offset + (char __iomem *)addr);
+}
+
+
+/**
+ * ioremap_nocache - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_nocache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * on the CPU as well as honouring existing caching rules from things like
+ * the PCI bus. Note that there are other caches and buffers on many
+ * busses. In particular driver authors should read up on PCI writes
+ *
+ * It's useful if some control registers are in such an area and
+ * write combining or read caching is not desirable:
+ *
+ * Must be freed with iounmap.
+ */
+
+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+ unsigned long last_addr;
+ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
+ if (!p)
+ return p;
+
+ /* Guaranteed to be > phys_addr, as per __ioremap() */
+ last_addr = phys_addr + size - 1;
+
+ if (is_local_lowmem(last_addr)) {
+ struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
+ unsigned long npages;
+
+ phys_addr &= PAGE_MASK;
+
+ /* This might overflow and become zero.. */
+ last_addr = PAGE_ALIGN(last_addr);
+
+ /* .. but that's ok, because modulo-2**n arithmetic will make
+ * the page-aligned "last - first" come out right.
+ */
+ npages = (last_addr - phys_addr) >> PAGE_SHIFT;
+
+ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
+ iounmap(p);
+ p = NULL;
+ }
+ global_flush_tlb();
+ }
+
+ return p;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+ struct vm_struct *p;
+ if ((void __force *) addr <= high_memory)
+ return;
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
+ return;
+#endif
+ p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
+ if (!p) {
+ printk("__iounmap: bad address %p\n", addr);
+ return;
+ }
+
+ if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
+ /* p->size includes the guard page, but cpa doesn't like that */
+ change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
+ (p->size - PAGE_SIZE) >> PAGE_SHIFT,
+ PAGE_KERNEL);
+ global_flush_tlb();
+ }
+ kfree(p);
+}
+
+#if defined(__i386__)
+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+{
+ unsigned long offset, last_addr;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ /*
+ * Don't remap the low PCI/ISA area, it's always mapped..
+ */
+ if (phys_addr >= 0x0 && last_addr < 0x100000)
+ return isa_bus_to_virt(phys_addr);
+#endif
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr) - phys_addr;
+
+ /*
+ * Mappings have to fit in the FIX_BTMAP area.
+ */
+ nrpages = size >> PAGE_SHIFT;
+ if (nrpages > NR_FIX_BTMAPS)
+ return NULL;
+
+ /*
+ * Ok, go for it..
+ */
+ idx = FIX_BTMAP_BEGIN;
+ while (nrpages > 0) {
+ set_fixmap(idx, phys_addr);
+ phys_addr += PAGE_SIZE;
+ --idx;
+ --nrpages;
+ }
+ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
+}
+
+void __init bt_iounmap(void *addr, unsigned long size)
+{
+ unsigned long virt_addr;
+ unsigned long offset;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+
+ virt_addr = (unsigned long)addr;
+ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
+ return;
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
+ return;
+#endif
+ offset = virt_addr & ~PAGE_MASK;
+ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
+
+ idx = FIX_BTMAP_BEGIN;
+ while (nrpages > 0) {
+ clear_fixmap(idx);
+ --idx;
+ --nrpages;
+ }
+}
+#endif /* defined(__i386__) */
+
+#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
+
+/* These hacky macros avoid phys->machine translations. */
+#define __direct_pte(x) ((pte_t) { (x) } )
+#define __direct_mk_pte(page_nr,pgprot) \
+ __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
+#define direct_mk_pte_phys(physpage, pgprot) \
+ __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
+
+static inline void direct_remap_area_pte(pte_t *pte,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t **v)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ if (address >= end)
+ BUG();
+
+ do {
+ (*v)->ptr = virt_to_machine(pte);
+ (*v)++;
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int direct_remap_area_pmd(struct mm_struct *mm,
+ pmd_t *pmd,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t **v)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ if (address >= end)
+ BUG();
+ do {
+ pte_t *pte = (mm == &init_mm) ?
+ pte_alloc_kernel(mm, pmd, address) :
+ pte_alloc_map(mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ direct_remap_area_pte(pte, address, end - address, v);
+ pte_unmap(pte);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+int __direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t *v)
+{
+ pgd_t * dir;
+ unsigned long end = address + size;
+ int error;
+
+#if defined(__i386__)
+ dir = pgd_offset(mm, address);
+#elif defined (__x86_64)
+ dir = (mm == &init_mm) ?
+ pgd_offset_k(address):
+ pgd_offset(mm, address);
+#endif
+ if (address >= end)
+ BUG();
+ spin_lock(&mm->page_table_lock);
+ do {
+ pud_t *pud;
+ pmd_t *pmd;
+
+ error = -ENOMEM;
+ pud = pud_alloc(mm, dir, address);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(mm, pud, address);
+ if (!pmd)
+ break;
+ error = 0;
+ direct_remap_area_pmd(mm, pmd, address, end - address, &v);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+
+ } while (address && (address < end));
+ spin_unlock(&mm->page_table_lock);
+ return error;
+}
+
+
+int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid)
+{
+ int i;
+ unsigned long start_address;
+#define MAX_DIRECTMAP_MMU_QUEUE 130
+ mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v = u;
+
+ start_address = address;
+
+ flush_cache_all();
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
+ /* Fill in the PTE pointers. */
+ __direct_remap_area_pages(mm,
+ start_address,
+ address-start_address,
+ u);
+
+ if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
+ return -EFAULT;
+ v = u;
+ start_address = address;
+ }
+
+ /*
+ * Fill in the machine address: PTE ptr is done later by
+ * __direct_remap_area_pages().
+ */
+ v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
+
+ machine_addr += PAGE_SIZE;
+ address += PAGE_SIZE;
+ v++;
+ }
+
+ if (v != u) {
+ /* get the ptep's filled in */
+ __direct_remap_area_pages(mm,
+ start_address,
+ address-start_address,
+ u);
+ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
+ return -EFAULT;
+ }
+
+ flush_tlb_all();
+
+ return 0;
+}
+
+EXPORT_SYMBOL(direct_remap_area_pages);
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/pageattr.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/pageattr.c
new file mode 100644
index 0000000000..63e9a38a17
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/pageattr.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2002 Andi Kleen, SuSE Labs.
+ * Thanks to Ben LaHaise for precious feedback.
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+#include <asm/io.h>
+
+void pte_free(struct page *pte)
+{
+ pte_t *ptep;
+
+ ptep = pfn_to_kaddr(page_to_pfn(pte));
+
+ xen_pte_unpin(__pa(ptep));
+ make_page_writable(ptep);
+ __free_page(pte);
+}
+
+static inline pte_t *lookup_address(unsigned long address)
+{
+ pgd_t *pgd = pgd_offset_k(address);
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ if (pgd_none(*pgd))
+ return NULL;
+ pud = pud_offset(pgd, address);
+ if (!pud_present(*pud))
+ return NULL;
+ pmd = pmd_offset(pud, address);
+ if (!pmd_present(*pmd))
+ return NULL;
+ if (pmd_large(*pmd))
+ return (pte_t *)pmd;
+ pte = pte_offset_kernel(pmd, address);
+ if (pte && !pte_present(*pte))
+ pte = NULL;
+ return pte;
+}
+
+static struct page *split_large_page(unsigned long address, pgprot_t prot,
+ pgprot_t ref_prot)
+{
+ int i;
+ unsigned long addr;
+ struct page *base = alloc_pages(GFP_KERNEL, 0);
+ pte_t *pbase;
+ if (!base)
+ return NULL;
+ address = __pa(address);
+ addr = address & LARGE_PAGE_MASK;
+ pbase = (pte_t *)page_address(base);
+ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
+ pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
+ addr == address ? prot : ref_prot);
+ }
+ return base;
+}
+
+
+static void flush_kernel_map(void *address)
+{
+ if (0 && address && cpu_has_clflush) {
+ /* is this worth it? */
+ int i;
+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ asm volatile("clflush (%0)" :: "r" (address + i));
+ } else
+ asm volatile("wbinvd":::"memory");
+ if (address)
+ __flush_tlb_one((unsigned long) address);
+ else
+ __flush_tlb_all();
+}
+
+
+static inline void flush_map(unsigned long address)
+{
+ on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
+}
+
+struct deferred_page {
+ struct deferred_page *next;
+ struct page *fpage;
+ unsigned long address;
+};
+static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
+
+static inline void save_page(unsigned long address, struct page *fpage)
+{
+ struct deferred_page *df;
+ df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL);
+ if (!df) {
+ flush_map(address);
+ __free_page(fpage);
+ } else {
+ df->next = df_list;
+ df->fpage = fpage;
+ df->address = address;
+ df_list = df;
+ }
+}
+
+/*
+ * No more special protections in this 2/4MB area - revert to a
+ * large page again.
+ */
+static void revert_page(unsigned long address, pgprot_t ref_prot)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t large_pte;
+
+ pgd = pgd_offset_k(address);
+ BUG_ON(pgd_none(*pgd));
+ pud = pud_offset(pgd,address);
+ BUG_ON(pud_none(*pud));
+ pmd = pmd_offset(pud, address);
+ BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
+ pgprot_val(ref_prot) |= _PAGE_PSE;
+ large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
+ set_pte((pte_t *)pmd, large_pte);
+}
+
+static int
+__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
+ pgprot_t ref_prot)
+{
+ pte_t *kpte;
+ struct page *kpte_page;
+ unsigned kpte_flags;
+ kpte = lookup_address(address);
+ if (!kpte) return 0;
+ kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
+ kpte_flags = pte_val(*kpte);
+ if (pgprot_val(prot) != pgprot_val(ref_prot)) {
+ if ((kpte_flags & _PAGE_PSE) == 0) {
+ set_pte(kpte, pfn_pte(pfn, prot));
+ } else {
+ /*
+ * split_large_page will take the reference for this change_page_attr
+ * on the split page.
+ */
+ struct page *split = split_large_page(address, prot, ref_prot);
+ if (!split)
+ return -ENOMEM;
+ set_pte(kpte,mk_pte(split, ref_prot));
+ kpte_page = split;
+ }
+ get_page(kpte_page);
+ } else if ((kpte_flags & _PAGE_PSE) == 0) {
+ set_pte(kpte, pfn_pte(pfn, ref_prot));
+ __put_page(kpte_page);
+ } else
+ BUG();
+
+ /* on x86-64 the direct mapping set at boot is not using 4k pages */
+ BUG_ON(PageReserved(kpte_page));
+
+ switch (page_count(kpte_page)) {
+ case 1:
+ save_page(address, kpte_page);
+ revert_page(address, ref_prot);
+ break;
+ case 0:
+ BUG(); /* memleak and failed 2M page regeneration */
+ }
+ return 0;
+}
+
+/*
+ * Change the page attributes of an page in the linear mapping.
+ *
+ * This should be used when a page is mapped with a different caching policy
+ * than write-back somewhere - some CPUs do not like it when mappings with
+ * different caching policies exist. This changes the page attributes of the
+ * in kernel linear mapping too.
+ *
+ * The caller needs to ensure that there are no conflicting mappings elsewhere.
+ * This function only deals with the kernel linear map.
+ *
+ * Caller must call global_flush_tlb() after this.
+ */
+int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
+{
+ int err = 0;
+ int i;
+
+ down_write(&init_mm.mmap_sem);
+ for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
+ unsigned long pfn = __pa(address) >> PAGE_SHIFT;
+
+ err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
+ if (err)
+ break;
+ /* Handle kernel mapping too which aliases part of the
+ * lowmem */
+ if (__pa(address) < KERNEL_TEXT_SIZE) {
+ unsigned long addr2;
+ pgprot_t prot2 = prot;
+ addr2 = __START_KERNEL_map + __pa(address);
+ pgprot_val(prot2) &= ~_PAGE_NX;
+ err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
+ }
+ }
+ up_write(&init_mm.mmap_sem);
+ return err;
+}
+
+/* Don't call this for MMIO areas that may not have a mem_map entry */
+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ return change_page_attr_addr(addr, numpages, prot);
+}
+
+void global_flush_tlb(void)
+{
+ struct deferred_page *df, *next_df;
+
+ down_read(&init_mm.mmap_sem);
+ df = xchg(&df_list, NULL);
+ up_read(&init_mm.mmap_sem);
+ if (!df)
+ return;
+ flush_map((df && !df->next) ? df->address : 0);
+ for (; df; df = next_df) {
+ next_df = df->next;
+ if (df->fpage)
+ __free_page(df->fpage);
+ kfree(df);
+ }
+}
+
+EXPORT_SYMBOL(change_page_attr);
+EXPORT_SYMBOL(global_flush_tlb);
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/pci/Makefile b/linux-2.6.11-xen-sparse/arch/xen/x86_64/pci/Makefile
new file mode 100644
index 0000000000..47dbc45daa
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/pci/Makefile
@@ -0,0 +1,41 @@
+#
+# Makefile for X86_64 specific PCI routines
+#
+# Reuse the i386 PCI subsystem
+#
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+CFLAGS += -Iarch/$(XENARCH)/pci
+
+CFLAGS += -Iarch/i386/pci
+
+c-obj-y := i386.o
+c-obj-y += fixup.o
+c-obj-$(CONFIG_ACPI_PCI) += acpi.o
+c-obj-y += legacy.o common.o
+c-obj-$(CONFIG_PCI_DIRECT)+= direct.o
+c-xen-obj-y += irq.o
+# mmconfig has a 64bit special
+c-obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
+
+c-obj-$(CONFIG_NUMA) += k8-bus.o
+
+c-direct-y += ../../i386/pci/direct.o
+c-acpi-y += ../../i386/pci/acpi.o
+c-legacy-y += ../../i386/pci/legacy.o
+c-irq-y += ../../i386/pci/irq.o
+c-common-y += ../../i386/pci/common.o
+c-fixup-y += ../../i386/pci/fixup.o
+c-i386-y += ../../i386/pci/i386.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-xen-obj-y) $(c-link)):
+ @ln -fsn $(srctree)/arch/xen/i386/pci/$(notdir $@) $@
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+ @ln -fsn $(srctree)/arch/i386/pci/$(notdir $@) $@
+
+obj-y += $(c-obj-y)
+obj-y += $(c-xen-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/pci/Makefile-BUS b/linux-2.6.11-xen-sparse/arch/xen/x86_64/pci/Makefile-BUS
new file mode 100644
index 0000000000..291985f0d2
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/pci/Makefile-BUS
@@ -0,0 +1,22 @@
+#
+# Makefile for X86_64 specific PCI routines
+#
+# Reuse the i386 PCI subsystem
+#
+CFLAGS += -I arch/i386/pci
+
+obj-y := i386.o
+obj-$(CONFIG_PCI_DIRECT)+= direct.o
+obj-y += fixup.o
+obj-$(CONFIG_ACPI_PCI) += acpi.o
+obj-y += legacy.o irq.o common.o
+# mmconfig has a 64bit special
+obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
+
+direct-y += ../../i386/pci/direct.o
+acpi-y += ../../i386/pci/acpi.o
+legacy-y += ../../i386/pci/legacy.o
+irq-y += ../../i386/pci/irq.o
+common-y += ../../i386/pci/common.o
+fixup-y += ../../i386/pci/fixup.o
+i386-y += ../../i386/pci/i386.o
diff --git a/linux-2.6.11-xen-sparse/drivers/acpi/tables.c b/linux-2.6.11-xen-sparse/drivers/acpi/tables.c
new file mode 100644
index 0000000000..79e956b832
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/acpi/tables.c
@@ -0,0 +1,610 @@
+/*
+ * acpi_tables.c - ACPI Boot-Time Table Parsing
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/bootmem.h>
+
+#define PREFIX "ACPI: "
+
+#define ACPI_MAX_TABLES 256
+
+static char *acpi_table_signatures[ACPI_TABLE_COUNT] = {
+ [ACPI_TABLE_UNKNOWN] = "????",
+ [ACPI_APIC] = "APIC",
+ [ACPI_BOOT] = "BOOT",
+ [ACPI_DBGP] = "DBGP",
+ [ACPI_DSDT] = "DSDT",
+ [ACPI_ECDT] = "ECDT",
+ [ACPI_ETDT] = "ETDT",
+ [ACPI_FADT] = "FACP",
+ [ACPI_FACS] = "FACS",
+ [ACPI_OEMX] = "OEM",
+ [ACPI_PSDT] = "PSDT",
+ [ACPI_SBST] = "SBST",
+ [ACPI_SLIT] = "SLIT",
+ [ACPI_SPCR] = "SPCR",
+ [ACPI_SRAT] = "SRAT",
+ [ACPI_SSDT] = "SSDT",
+ [ACPI_SPMI] = "SPMI",
+ [ACPI_HPET] = "HPET",
+ [ACPI_MCFG] = "MCFG",
+};
+
+static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
+static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" };
+
+/* System Description Table (RSDT/XSDT) */
+struct acpi_table_sdt {
+ unsigned long pa;
+ enum acpi_table_id id;
+ unsigned long size;
+} __attribute__ ((packed));
+
+static unsigned long sdt_pa; /* Physical Address */
+static unsigned long sdt_count; /* Table count */
+
+static struct acpi_table_sdt sdt_entry[ACPI_MAX_TABLES];
+
+void
+acpi_table_print (
+ struct acpi_table_header *header,
+ unsigned long phys_addr)
+{
+ char *name = NULL;
+
+ if (!header)
+ return;
+
+ /* Some table signatures aren't good table names */
+
+ if (!strncmp((char *) &header->signature,
+ acpi_table_signatures[ACPI_APIC],
+ sizeof(header->signature))) {
+ name = "MADT";
+ }
+ else if (!strncmp((char *) &header->signature,
+ acpi_table_signatures[ACPI_FADT],
+ sizeof(header->signature))) {
+ name = "FADT";
+ }
+ else
+ name = header->signature;
+
+ printk(KERN_DEBUG PREFIX "%.4s (v%3.3d %6.6s %8.8s 0x%08x %.4s 0x%08x) @ 0x%p\n",
+ name, header->revision, header->oem_id,
+ header->oem_table_id, header->oem_revision,
+ header->asl_compiler_id, header->asl_compiler_revision,
+ (void *) phys_addr);
+}
+
+
+void
+acpi_table_print_madt_entry (
+ acpi_table_entry_header *header)
+{
+ if (!header)
+ return;
+
+ switch (header->type) {
+
+ case ACPI_MADT_LAPIC:
+ {
+ struct acpi_table_lapic *p =
+ (struct acpi_table_lapic*) header;
+ printk(KERN_INFO PREFIX "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
+ p->acpi_id, p->id, p->flags.enabled?"enabled":"disabled");
+ }
+ break;
+
+ case ACPI_MADT_IOAPIC:
+ {
+ struct acpi_table_ioapic *p =
+ (struct acpi_table_ioapic*) header;
+ printk(KERN_INFO PREFIX "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
+ p->id, p->address, p->global_irq_base);
+ }
+ break;
+
+ case ACPI_MADT_INT_SRC_OVR:
+ {
+ struct acpi_table_int_src_ovr *p =
+ (struct acpi_table_int_src_ovr*) header;
+ printk(KERN_INFO PREFIX "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
+ p->bus, p->bus_irq, p->global_irq,
+ mps_inti_flags_polarity[p->flags.polarity],
+ mps_inti_flags_trigger[p->flags.trigger]);
+ if(p->flags.reserved)
+ printk(KERN_INFO PREFIX "INT_SRC_OVR unexpected reserved flags: 0x%x\n",
+ p->flags.reserved);
+
+ }
+ break;
+
+ case ACPI_MADT_NMI_SRC:
+ {
+ struct acpi_table_nmi_src *p =
+ (struct acpi_table_nmi_src*) header;
+ printk(KERN_INFO PREFIX "NMI_SRC (%s %s global_irq %d)\n",
+ mps_inti_flags_polarity[p->flags.polarity],
+ mps_inti_flags_trigger[p->flags.trigger], p->global_irq);
+ }
+ break;
+
+ case ACPI_MADT_LAPIC_NMI:
+ {
+ struct acpi_table_lapic_nmi *p =
+ (struct acpi_table_lapic_nmi*) header;
+ printk(KERN_INFO PREFIX "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
+ p->acpi_id,
+ mps_inti_flags_polarity[p->flags.polarity],
+ mps_inti_flags_trigger[p->flags.trigger], p->lint);
+ }
+ break;
+
+ case ACPI_MADT_LAPIC_ADDR_OVR:
+ {
+ struct acpi_table_lapic_addr_ovr *p =
+ (struct acpi_table_lapic_addr_ovr*) header;
+ printk(KERN_INFO PREFIX "LAPIC_ADDR_OVR (address[%p])\n",
+ (void *) (unsigned long) p->address);
+ }
+ break;
+
+ case ACPI_MADT_IOSAPIC:
+ {
+ struct acpi_table_iosapic *p =
+ (struct acpi_table_iosapic*) header;
+ printk(KERN_INFO PREFIX "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
+ p->id, (void *) (unsigned long) p->address, p->global_irq_base);
+ }
+ break;
+
+ case ACPI_MADT_LSAPIC:
+ {
+ struct acpi_table_lsapic *p =
+ (struct acpi_table_lsapic*) header;
+ printk(KERN_INFO PREFIX "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
+ p->acpi_id, p->id, p->eid, p->flags.enabled?"enabled":"disabled");
+ }
+ break;
+
+ case ACPI_MADT_PLAT_INT_SRC:
+ {
+ struct acpi_table_plat_int_src *p =
+ (struct acpi_table_plat_int_src*) header;
+ printk(KERN_INFO PREFIX "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
+ mps_inti_flags_polarity[p->flags.polarity],
+ mps_inti_flags_trigger[p->flags.trigger],
+ p->type, p->id, p->eid, p->iosapic_vector, p->global_irq);
+ }
+ break;
+
+ default:
+ printk(KERN_WARNING PREFIX "Found unsupported MADT entry (type = 0x%x)\n",
+ header->type);
+ break;
+ }
+}
+
+
+static int
+acpi_table_compute_checksum (
+ void *table_pointer,
+ unsigned long length)
+{
+ u8 *p = (u8 *) table_pointer;
+ unsigned long remains = length;
+ unsigned long sum = 0;
+
+ if (!p || !length)
+ return -EINVAL;
+
+ while (remains--)
+ sum += *p++;
+
+ return (sum & 0xFF);
+}
+
+/*
+ * acpi_get_table_header_early()
+ * for acpi_blacklisted(), acpi_table_get_sdt()
+ */
+int __init
+acpi_get_table_header_early (
+ enum acpi_table_id id,
+ struct acpi_table_header **header)
+{
+ unsigned int i;
+ enum acpi_table_id temp_id;
+
+ /* DSDT is different from the rest */
+ if (id == ACPI_DSDT)
+ temp_id = ACPI_FADT;
+ else
+ temp_id = id;
+
+ /* Locate the table. */
+
+ for (i = 0; i < sdt_count; i++) {
+ if (sdt_entry[i].id != temp_id)
+ continue;
+ *header = (void *)
+ __acpi_map_table(sdt_entry[i].pa, sdt_entry[i].size);
+ if (!*header) {
+ printk(KERN_WARNING PREFIX "Unable to map %s\n",
+ acpi_table_signatures[temp_id]);
+ return -ENODEV;
+ }
+ break;
+ }
+
+ if (!*header) {
+ printk(KERN_WARNING PREFIX "%s not present\n",
+ acpi_table_signatures[id]);
+ return -ENODEV;
+ }
+
+ /* Map the DSDT header via the pointer in the FADT */
+ if (id == ACPI_DSDT) {
+ struct fadt_descriptor_rev2 *fadt = (struct fadt_descriptor_rev2 *) *header;
+
+ if (fadt->revision == 3 && fadt->Xdsdt) {
+ *header = (void *) __acpi_map_table(fadt->Xdsdt,
+ sizeof(struct acpi_table_header));
+ } else if (fadt->V1_dsdt) {
+ *header = (void *) __acpi_map_table(fadt->V1_dsdt,
+ sizeof(struct acpi_table_header));
+ } else
+ *header = NULL;
+
+ if (!*header) {
+ printk(KERN_WARNING PREFIX "Unable to map DSDT\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+
+int __init
+acpi_table_parse_madt_family (
+ enum acpi_table_id id,
+ unsigned long madt_size,
+ int entry_id,
+ acpi_madt_entry_handler handler,
+ unsigned int max_entries)
+{
+ void *madt = NULL;
+ acpi_table_entry_header *entry;
+ unsigned int count = 0;
+ unsigned long madt_end;
+ unsigned int i;
+
+ if (!handler)
+ return -EINVAL;
+
+ /* Locate the MADT (if exists). There should only be one. */
+
+ for (i = 0; i < sdt_count; i++) {
+ if (sdt_entry[i].id != id)
+ continue;
+ madt = (void *)
+ __acpi_map_table(sdt_entry[i].pa, sdt_entry[i].size);
+ if (!madt) {
+ printk(KERN_WARNING PREFIX "Unable to map %s\n",
+ acpi_table_signatures[id]);
+ return -ENODEV;
+ }
+ break;
+ }
+
+ if (!madt) {
+ printk(KERN_WARNING PREFIX "%s not present\n",
+ acpi_table_signatures[id]);
+ return -ENODEV;
+ }
+
+ madt_end = (unsigned long) madt + sdt_entry[i].size;
+
+ /* Parse all entries looking for a match. */
+
+ entry = (acpi_table_entry_header *)
+ ((unsigned long) madt + madt_size);
+
+ while (((unsigned long) entry) + sizeof(acpi_table_entry_header) < madt_end) {
+ if (entry->type == entry_id &&
+ (!max_entries || count++ < max_entries))
+ if (handler(entry, madt_end))
+ return -EINVAL;
+
+ entry = (acpi_table_entry_header *)
+ ((unsigned long) entry + entry->length);
+ }
+ if (max_entries && count > max_entries) {
+ printk(KERN_WARNING PREFIX "[%s:0x%02x] ignored %i entries of "
+ "%i found\n", acpi_table_signatures[id], entry_id,
+ count - max_entries, count);
+ }
+
+ return count;
+}
+
+
+int __init
+acpi_table_parse_madt (
+ enum acpi_madt_entry_id id,
+ acpi_madt_entry_handler handler,
+ unsigned int max_entries)
+{
+ return acpi_table_parse_madt_family(ACPI_APIC, sizeof(struct acpi_table_madt),
+ id, handler, max_entries);
+}
+
+
+int __init
+acpi_table_parse (
+ enum acpi_table_id id,
+ acpi_table_handler handler)
+{
+ int count = 0;
+ unsigned int i = 0;
+
+ if (!handler)
+ return -EINVAL;
+
+ for (i = 0; i < sdt_count; i++) {
+ if (sdt_entry[i].id != id)
+ continue;
+ count++;
+ if (count == 1)
+ handler(sdt_entry[i].pa, sdt_entry[i].size);
+
+ else
+ printk(KERN_WARNING PREFIX "%d duplicate %s table ignored.\n",
+ count, acpi_table_signatures[id]);
+ }
+
+ return count;
+}
+
+
+static int __init
+acpi_table_get_sdt (
+ struct acpi_table_rsdp *rsdp)
+{
+ struct acpi_table_header *header = NULL;
+ unsigned int i, id = 0;
+
+ if (!rsdp)
+ return -EINVAL;
+
+ /* First check XSDT (but only on ACPI 2.0-compatible systems) */
+
+ if ((rsdp->revision >= 2) &&
+ (((struct acpi20_table_rsdp*)rsdp)->xsdt_address)) {
+
+ struct acpi_table_xsdt *mapped_xsdt = NULL;
+
+ sdt_pa = ((struct acpi20_table_rsdp*)rsdp)->xsdt_address;
+
+ /* map in just the header */
+ header = (struct acpi_table_header *)
+ __acpi_map_table(sdt_pa, sizeof(struct acpi_table_header));
+
+ if (!header) {
+ printk(KERN_WARNING PREFIX "Unable to map XSDT header\n");
+ return -ENODEV;
+ }
+
+ /* remap in the entire table before processing */
+ mapped_xsdt = (struct acpi_table_xsdt *)
+ __acpi_map_table(sdt_pa, header->length);
+ if (!mapped_xsdt) {
+ printk(KERN_WARNING PREFIX "Unable to map XSDT\n");
+ return -ENODEV;
+ }
+ header = &mapped_xsdt->header;
+
+ if (strncmp(header->signature, "XSDT", 4)) {
+ printk(KERN_WARNING PREFIX "XSDT signature incorrect\n");
+ return -ENODEV;
+ }
+
+ if (acpi_table_compute_checksum(header, header->length)) {
+ printk(KERN_WARNING PREFIX "Invalid XSDT checksum\n");
+ return -ENODEV;
+ }
+
+ sdt_count = (header->length - sizeof(struct acpi_table_header)) >> 3;
+ if (sdt_count > ACPI_MAX_TABLES) {
+ printk(KERN_WARNING PREFIX "Truncated %lu XSDT entries\n",
+ (sdt_count - ACPI_MAX_TABLES));
+ sdt_count = ACPI_MAX_TABLES;
+ }
+
+ for (i = 0; i < sdt_count; i++)
+ sdt_entry[i].pa = (unsigned long) mapped_xsdt->entry[i];
+ }
+
+ /* Then check RSDT */
+
+ else if (rsdp->rsdt_address) {
+
+ struct acpi_table_rsdt *mapped_rsdt = NULL;
+
+ sdt_pa = rsdp->rsdt_address;
+
+ /* map in just the header */
+ header = (struct acpi_table_header *)
+ __acpi_map_table(sdt_pa, sizeof(struct acpi_table_header));
+ if (!header) {
+ printk(KERN_WARNING PREFIX "Unable to map RSDT header\n");
+ return -ENODEV;
+ }
+
+ /* remap in the entire table before processing */
+ mapped_rsdt = (struct acpi_table_rsdt *)
+ __acpi_map_table(sdt_pa, header->length);
+ if (!mapped_rsdt) {
+ printk(KERN_WARNING PREFIX "Unable to map RSDT\n");
+ return -ENODEV;
+ }
+ header = &mapped_rsdt->header;
+
+ if (strncmp(header->signature, "RSDT", 4)) {
+ printk(KERN_WARNING PREFIX "RSDT signature incorrect\n");
+ return -ENODEV;
+ }
+
+ if (acpi_table_compute_checksum(header, header->length)) {
+ printk(KERN_WARNING PREFIX "Invalid RSDT checksum\n");
+ return -ENODEV;
+ }
+
+ sdt_count = (header->length - sizeof(struct acpi_table_header)) >> 2;
+ if (sdt_count > ACPI_MAX_TABLES) {
+ printk(KERN_WARNING PREFIX "Truncated %lu RSDT entries\n",
+ (sdt_count - ACPI_MAX_TABLES));
+ sdt_count = ACPI_MAX_TABLES;
+ }
+
+ for (i = 0; i < sdt_count; i++)
+ sdt_entry[i].pa = (unsigned long) mapped_rsdt->entry[i];
+ }
+
+ else {
+ printk(KERN_WARNING PREFIX "No System Description Table (RSDT/XSDT) specified in RSDP\n");
+ return -ENODEV;
+ }
+
+ acpi_table_print(header, sdt_pa);
+
+ for (i = 0; i < sdt_count; i++) {
+
+ /* map in just the header */
+ header = (struct acpi_table_header *)
+ __acpi_map_table(sdt_entry[i].pa,
+ sizeof(struct acpi_table_header));
+ if (!header)
+ continue;
+
+ /* remap in the entire table before processing */
+ header = (struct acpi_table_header *)
+ __acpi_map_table(sdt_entry[i].pa,
+ header->length);
+ if (!header)
+ continue;
+
+ acpi_table_print(header, sdt_entry[i].pa);
+
+ if (acpi_table_compute_checksum(header, header->length)) {
+ printk(KERN_WARNING " >>> ERROR: Invalid checksum\n");
+ continue;
+ }
+
+ sdt_entry[i].size = header->length;
+
+ for (id = 0; id < ACPI_TABLE_COUNT; id++) {
+ if (!strncmp((char *) &header->signature,
+ acpi_table_signatures[id],
+ sizeof(header->signature))) {
+ sdt_entry[i].id = id;
+ }
+ }
+ }
+
+ /*
+ * The DSDT is *not* in the RSDT (why not? no idea.) but we want
+ * to print its info, because this is what people usually blacklist
+ * against. Unfortunately, we don't know the phys_addr, so just
+ * print 0. Maybe no one will notice.
+ */
+ if(!acpi_get_table_header_early(ACPI_DSDT, &header))
+ acpi_table_print(header, 0);
+
+ return 0;
+}
+
+/*
+ * acpi_table_init()
+ *
+ * find RSDP, find and checksum SDT/XSDT.
+ * checksum all tables, print SDT/XSDT
+ *
+ * result: sdt_entry[] is initialized
+ */
+
+int __init
+acpi_table_init (void)
+{
+ struct acpi_table_rsdp *rsdp = NULL;
+ unsigned long rsdp_phys = 0;
+ int result = 0;
+
+ /* Locate and map the Root System Description Table (RSDP) */
+
+ rsdp_phys = acpi_find_rsdp();
+ if (!rsdp_phys) {
+ printk(KERN_ERR PREFIX "Unable to locate RSDP\n");
+ return -ENODEV;
+ }
+
+ rsdp = (struct acpi_table_rsdp *) (__fix_to_virt(FIX_ACPI_RSDP_PAGE) +
+ (rsdp_phys & ~PAGE_MASK));
+ if (!rsdp) {
+ printk(KERN_WARNING PREFIX "Unable to map RSDP\n");
+ return -ENODEV;
+ }
+
+ printk(KERN_DEBUG PREFIX "RSDP (v%3.3d %6.6s ) @ 0x%p\n",
+ rsdp->revision, rsdp->oem_id, (void *) rsdp_phys);
+
+ if (rsdp->revision < 2)
+ result = acpi_table_compute_checksum(rsdp, sizeof(struct acpi_table_rsdp));
+ else
+ result = acpi_table_compute_checksum(rsdp, ((struct acpi20_table_rsdp *)rsdp)->length);
+
+ if (result) {
+ printk(KERN_WARNING " >>> ERROR: Invalid checksum\n");
+ return -ENODEV;
+ }
+
+ /* Locate and map the System Description table (RSDT/XSDT) */
+
+ if (acpi_table_get_sdt(rsdp))
+ return -ENODEV;
+
+ return 0;
+}
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/Makefile b/linux-2.6.11-xen-sparse/drivers/xen/Makefile
index 0bfb5a50c3..50e13067a3 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/Makefile
+++ b/linux-2.6.11-xen-sparse/drivers/xen/Makefile
@@ -3,10 +3,11 @@
obj-y += console/
obj-y += evtchn/
obj-y += balloon/
+obj-y += privcmd/
-obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += privcmd/
obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
+obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/balloon/balloon.c b/linux-2.6.11-xen-sparse/drivers/xen/balloon/balloon.c
index ff82cd2f0a..649f64c402 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/balloon/balloon.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/balloon/balloon.c
@@ -139,24 +139,6 @@ static struct page *balloon_retrieve(void)
return page;
}
-static inline pte_t *get_ptep(unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-
- pgd = pgd_offset_k(addr);
- if ( pgd_none(*pgd) || pgd_bad(*pgd) ) BUG();
-
- pud = pud_offset(pgd, addr);
- if ( pud_none(*pud) || pud_bad(*pud) ) BUG();
-
- pmd = pmd_offset(pud, addr);
- if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
-
- return pte_offset_kernel(pmd, addr);
-}
-
static void balloon_alarm(unsigned long unused)
{
schedule_work(&balloon_worker);
@@ -220,14 +202,18 @@ static void balloon_process(void *unused)
/* Update P->M and M->P tables. */
phys_to_machine_mapping[pfn] = mfn_list[i];
- queue_machphys_update(mfn_list[i], pfn);
+ xen_machphys_update(mfn_list[i], pfn);
/* Link back into the page tables if it's not a highmem page. */
if ( pfn < max_low_pfn )
- queue_l1_entry_update(
- get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
- (mfn_list[i] << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
-
+ {
+ HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ __pte_ma((mfn_list[i] << PAGE_SHIFT) |
+ pgprot_val(PAGE_KERNEL)),
+ 0);
+ }
+
/* Finally, relinquish the memory back to the system allocator. */
ClearPageReserved(page);
set_page_count(page, 1);
@@ -259,7 +245,8 @@ static void balloon_process(void *unused)
{
v = phys_to_virt(pfn << PAGE_SHIFT);
scrub_pages(v, 1);
- queue_l1_entry_update(get_ptep((unsigned long)v), 0);
+ HYPERVISOR_update_va_mapping(
+ (unsigned long)v, __pte_ma(0), 0);
}
#ifdef CONFIG_XEN_SCRUB_PAGES
else
@@ -273,9 +260,7 @@ static void balloon_process(void *unused)
/* Ensure that ballooned highmem pages don't have cached mappings. */
kmap_flush_unused();
-
- /* Flush updates through and flush the TLB. */
- xen_tlb_flush();
+ flush_tlb_all();
/* No more mappings: invalidate pages in P2M and add to balloon. */
for ( i = 0; i < debt; i++ )
@@ -319,22 +304,17 @@ static void balloon_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
case CMSG_MEM_REQUEST_SET:
{
mem_request_t *req = (mem_request_t *)&msg->msg[0];
- if ( msg->length != sizeof(mem_request_t) )
- goto parse_error;
set_new_target(req->target);
req->status = 0;
}
break;
+
default:
- goto parse_error;
+ msg->length = 0;
+ break;
}
ctrl_if_send_response(msg);
- return;
-
- parse_error:
- msg->length = 0;
- ctrl_if_send_response(msg);
}
static int balloon_write(struct file *file, const char __user *buffer,
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c b/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c
index dc15b20180..40cd170e1d 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c
@@ -8,9 +8,14 @@
* arch/xen/drivers/blkif/frontend
*
* Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Copyright (c) 2005, Christopher Clark
*/
#include "common.h"
+#include <asm-xen/evtchn.h>
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+#include <asm-xen/xen-public/grant_table.h>
+#endif
/*
* These are rather arbitrary. They are fairly large because adjacent requests
@@ -25,13 +30,11 @@
#define BATCH_PER_DOMAIN 16
static unsigned long mmap_vstart;
-#define MMAP_PAGES_PER_REQUEST \
- (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
-#define MMAP_PAGES \
- (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
-#define MMAP_VADDR(_req,_seg) \
- (mmap_vstart + \
- ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
+#define MMAP_PAGES \
+ (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
+#define MMAP_VADDR(_req,_seg) \
+ (mmap_vstart + \
+ ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
((_seg) * PAGE_SIZE))
/*
@@ -81,6 +84,29 @@ static inline void flush_plugged_queue(void)
}
#endif
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+/* When using grant tables to map a frame for device access then the
+ * handle returned must be used to unmap the frame. This is needed to
+ * drop the ref count on the frame.
+ */
+static u16 pending_grant_handles[MMAP_PAGES];
+#define pending_handle(_idx, _i) \
+ (pending_grant_handles[((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) + (_i)])
+#define BLKBACK_INVALID_HANDLE (0xFFFF)
+#endif
+
+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
+/*
+ * If the tap driver is used, we may get pages belonging to either the tap
+ * or (more likely) the real frontend. The backend must specify which domain
+ * a given page belongs to in update_va_mapping though. For the moment,
+ * the tap rewrites the ID field of the request to contain the request index
+ * and the id of the real front end domain.
+ */
+#define BLKTAP_COOKIE 0xbeadfeed
+static inline domid_t ID_TO_DOM(unsigned long id) { return (id >> 16); }
+#endif
+
static int do_block_io_op(blkif_t *blkif, int max_to_do);
static void dispatch_probe(blkif_t *blkif, blkif_request_t *req);
static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req);
@@ -89,20 +115,42 @@ static void make_response(blkif_t *blkif, unsigned long id,
static void fast_flush_area(int idx, int nr_pages)
{
- multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ gnttab_op_t aop[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ unsigned int i, invcount = 0;
+ u16 handle;
+
+ for ( i = 0; i < nr_pages; i++ )
+ {
+ if ( BLKBACK_INVALID_HANDLE != ( handle = pending_handle(idx, i) ) )
+ {
+ aop[i].u.unmap_grant_ref.host_virt_addr = MMAP_VADDR(idx, i);
+ aop[i].u.unmap_grant_ref.dev_bus_addr = 0;
+ aop[i].u.unmap_grant_ref.handle = handle;
+ pending_handle(idx, i) = BLKBACK_INVALID_HANDLE;
+ invcount++;
+ }
+ }
+ if ( unlikely(HYPERVISOR_grant_table_op(
+ GNTTABOP_unmap_grant_ref, aop, invcount)))
+ BUG();
+#else
+
+ multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int i;
for ( i = 0; i < nr_pages; i++ )
{
mcl[i].op = __HYPERVISOR_update_va_mapping;
- mcl[i].args[0] = MMAP_VADDR(idx, i) >> PAGE_SHIFT;
+ mcl[i].args[0] = MMAP_VADDR(idx, i);
mcl[i].args[1] = 0;
mcl[i].args[2] = 0;
}
- mcl[nr_pages-1].args[2] = UVMF_FLUSH_TLB;
+ mcl[nr_pages-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
BUG();
+#endif
}
@@ -281,17 +329,16 @@ irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
static int do_block_io_op(blkif_t *blkif, int max_to_do)
{
- blkif_ring_t *blk_ring = blkif->blk_ring_base;
+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
blkif_request_t *req;
- BLKIF_RING_IDX i, rp;
+ RING_IDX i, rp;
int more_to_do = 0;
- rp = blk_ring->req_prod;
+ rp = blk_ring->sring->req_prod;
rmb(); /* Ensure we see queued requests up to 'rp'. */
- /* Take items off the comms ring, taking care not to overflow. */
- for ( i = blkif->blk_req_cons;
- (i != rp) && ((i-blkif->blk_resp_prod) != BLKIF_RING_SIZE);
+ for ( i = blk_ring->req_cons;
+ (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
i++ )
{
if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
@@ -300,7 +347,7 @@ static int do_block_io_op(blkif_t *blkif, int max_to_do)
break;
}
- req = &blk_ring->ring[MASK_BLKIF_IDX(i)].req;
+ req = RING_GET_REQUEST(blk_ring, i);
switch ( req->operation )
{
case BLKIF_OP_READ:
@@ -314,14 +361,13 @@ static int do_block_io_op(blkif_t *blkif, int max_to_do)
default:
DPRINTK("error: unknown block io operation [%d]\n",
- blk_ring->ring[i].req.operation);
- make_response(blkif, blk_ring->ring[i].req.id,
- blk_ring->ring[i].req.operation, BLKIF_RSP_ERROR);
+ req->operation);
+ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
break;
}
}
- blkif->blk_req_cons = i;
+ blk_ring->req_cons = i;
return more_to_do;
}
@@ -339,12 +385,50 @@ static void dispatch_probe(blkif_t *blkif, blkif_request_t *req)
(blkif_last_sect(req->frame_and_sects[0]) != 7) )
goto out;
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ {
+ gnttab_op_t op;
+
+ op.u.map_grant_ref.host_virt_addr = MMAP_VADDR(pending_idx, 0);
+ op.u.map_grant_ref.flags = GNTMAP_host_map;
+ op.u.map_grant_ref.ref = blkif_gref_from_fas(req->frame_and_sects[0]);
+ op.u.map_grant_ref.dom = blkif->domid;
+
+ if ( unlikely(HYPERVISOR_grant_table_op(
+ GNTTABOP_map_grant_ref, &op, 1)))
+ BUG();
+
+ if ( op.u.map_grant_ref.handle < 0 )
+ goto out;
+
+ pending_handle(pending_idx, 0) = op.u.map_grant_ref.handle;
+ }
+#else /* else CONFIG_XEN_BLKDEV_GRANT */
+
+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
+ /* Grab the real frontend out of the probe message. */
+ if (req->frame_and_sects[1] == BLKTAP_COOKIE)
+ blkif->is_blktap = 1;
+#endif
+
+
+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
if ( HYPERVISOR_update_va_mapping_otherdomain(
- MMAP_VADDR(pending_idx, 0) >> PAGE_SHIFT,
+ MMAP_VADDR(pending_idx, 0),
(pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
- 0, blkif->domid) )
+ 0, (blkif->is_blktap ? ID_TO_DOM(req->id) : blkif->domid) ) )
+
goto out;
-
+#else
+ if ( HYPERVISOR_update_va_mapping_otherdomain(
+ MMAP_VADDR(pending_idx, 0),
+ (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
+ 0, blkif->domid) )
+
+ goto out;
+#endif
+#endif /* endif CONFIG_XEN_BLKDEV_GRANT */
+
rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0),
PAGE_SIZE / sizeof(vdisk_t));
@@ -357,113 +441,152 @@ static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
{
extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
- short nr_sects;
- unsigned long buffer, fas;
- int i, tot_sects, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+ unsigned long fas = 0;
+ int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
pending_req_t *pending_req;
- unsigned long remap_prot;
- multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
-
- /* We map virtual scatter/gather segments to physical segments. */
- int new_segs, nr_psegs = 0;
- phys_seg_t phys_seg[BLKIF_MAX_SEGMENTS_PER_REQUEST + 1];
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ gnttab_op_t aop[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+#else
+ unsigned long remap_prot;
+ multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+#endif
+ struct phys_req preq;
+ struct {
+ unsigned long buf; unsigned int nsec;
+ } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ unsigned int nseg;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ struct buffer_head *bh;
+#else
+ struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ int nbio = 0;
+ request_queue_t *q;
+#endif
/* Check that number of segments is sane. */
- if ( unlikely(req->nr_segments == 0) ||
- unlikely(req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
+ nseg = req->nr_segments;
+ if ( unlikely(nseg == 0) ||
+ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
{
- DPRINTK("Bad number of segments in request (%d)\n", req->nr_segments);
+ DPRINTK("Bad number of segments in request (%d)\n", nseg);
goto bad_descriptor;
}
- /*
- * Check each address/size pair is sane, and convert into a
- * physical device and block offset. Note that if the offset and size
- * crosses a virtual extent boundary, we may end up with more
- * physical scatter/gather segments than virtual segments.
- */
- for ( i = tot_sects = 0; i < req->nr_segments; i++, tot_sects += nr_sects )
+ preq.dev = req->device;
+ preq.sector_number = req->sector_number;
+ preq.nr_sects = 0;
+
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ for ( i = 0; i < nseg; i++ )
{
- fas = req->frame_and_sects[i];
- buffer = (fas & PAGE_MASK) | (blkif_first_sect(fas) << 9);
- nr_sects = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
+ fas = req->frame_and_sects[i];
+ seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
- if ( nr_sects <= 0 )
+ if ( seg[i].nsec <= 0 )
goto bad_descriptor;
+ preq.nr_sects += seg[i].nsec;
+
+ aop[i].u.map_grant_ref.host_virt_addr = MMAP_VADDR(pending_idx, i);
+ aop[i].u.map_grant_ref.dom = blkif->domid;
+ aop[i].u.map_grant_ref.ref = blkif_gref_from_fas(fas);
+ aop[i].u.map_grant_ref.flags = GNTMAP_host_map;
+ if ( operation == WRITE )
+ aop[i].u.map_grant_ref.flags |= GNTMAP_readonly;
+ }
+
+ if ( unlikely(HYPERVISOR_grant_table_op(
+ GNTTABOP_map_grant_ref, aop, nseg)))
+ BUG();
- phys_seg[nr_psegs].dev = req->device;
- phys_seg[nr_psegs].sector_number = req->sector_number + tot_sects;
- phys_seg[nr_psegs].buffer = buffer;
- phys_seg[nr_psegs].nr_sects = nr_sects;
-
- /* Translate the request into the relevant 'physical device' */
- new_segs = vbd_translate(&phys_seg[nr_psegs], blkif, operation);
- if ( new_segs < 0 )
- {
- DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
- operation == READ ? "read" : "write",
- req->sector_number + tot_sects,
- req->sector_number + tot_sects + nr_sects,
- req->device);
+ for ( i = 0; i < nseg; i++ )
+ {
+ if ( unlikely(aop[i].u.map_grant_ref.handle < 0) )
+ {
+ DPRINTK("invalid buffer -- could not remap it\n");
+ fast_flush_area(pending_idx, nseg);
goto bad_descriptor;
}
-
- nr_psegs += new_segs;
- ASSERT(nr_psegs <= (BLKIF_MAX_SEGMENTS_PER_REQUEST+1));
+
+ phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
+ FOREIGN_FRAME(aop[i].u.map_grant_ref.dev_bus_addr);
+
+ pending_handle(pending_idx, i) = aop[i].u.map_grant_ref.handle;
+ }
+#endif
+
+ for ( i = 0; i < nseg; i++ )
+ {
+ fas = req->frame_and_sects[i];
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ seg[i].buf = (aop[i].u.map_grant_ref.dev_bus_addr << PAGE_SHIFT) |
+ (blkif_first_sect(fas) << 9);
+#else
+ seg[i].buf = (fas & PAGE_MASK) | (blkif_first_sect(fas) << 9);
+ seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
+ if ( seg[i].nsec <= 0 )
+ goto bad_descriptor;
+ preq.nr_sects += seg[i].nsec;
+#endif
}
- /* Nonsensical zero-sized request? */
- if ( unlikely(nr_psegs == 0) )
+ if ( vbd_translate(&preq, blkif, operation) != 0 )
+ {
+ DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
+ operation == READ ? "read" : "write", preq.sector_number,
+ preq.sector_number + preq.nr_sects, preq.dev);
goto bad_descriptor;
+ }
+#ifndef CONFIG_XEN_BLKDEV_GRANT
if ( operation == READ )
remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
else
remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED;
- for ( i = 0; i < nr_psegs; i++ )
+ for ( i = 0; i < nseg; i++ )
{
mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
- mcl[i].args[0] = MMAP_VADDR(pending_idx, i) >> PAGE_SHIFT;
- mcl[i].args[1] = (phys_seg[i].buffer & PAGE_MASK) | remap_prot;
+ mcl[i].args[0] = MMAP_VADDR(pending_idx, i);
+ mcl[i].args[1] = (seg[i].buf & PAGE_MASK) | remap_prot;
mcl[i].args[2] = 0;
mcl[i].args[3] = blkif->domid;
-
+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
+ if ( blkif->is_blktap )
+ mcl[i].args[3] = ID_TO_DOM(req->id);
+#endif
phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
- FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT);
+ FOREIGN_FRAME(seg[i].buf >> PAGE_SHIFT);
}
- if ( unlikely(HYPERVISOR_multicall(mcl, nr_psegs) != 0) )
- BUG();
+ BUG_ON(HYPERVISOR_multicall(mcl, nseg) != 0);
- for ( i = 0; i < nr_psegs; i++ )
+ for ( i = 0; i < nseg; i++ )
{
if ( unlikely(mcl[i].args[5] != 0) )
{
DPRINTK("invalid buffer -- could not remap it\n");
- fast_flush_area(pending_idx, nr_psegs);
+ fast_flush_area(pending_idx, nseg);
goto bad_descriptor;
}
}
+#endif /* end ifndef CONFIG_XEN_BLKDEV_GRANT */
pending_req = &pending_reqs[pending_idx];
pending_req->blkif = blkif;
pending_req->id = req->id;
pending_req->operation = operation;
pending_req->status = BLKIF_RSP_OKAY;
- pending_req->nr_pages = nr_psegs;
- atomic_set(&pending_req->pendcnt, nr_psegs);
- pending_cons++;
+ pending_req->nr_pages = nseg;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ atomic_set(&pending_req->pendcnt, nseg);
+ pending_cons++;
blkif_get(blkif);
- /* Now we pass each segment down to the real blkdev layer. */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
- for ( i = 0; i < nr_psegs; i++ )
+ for ( i = 0; i < nseg; i++ )
{
- struct buffer_head *bh;
-
- bh = kmem_cache_alloc(buffer_head_cachep, GFP_ATOMIC);
+ bh = kmem_cache_alloc(buffer_head_cachep, GFP_KERNEL);
if ( unlikely(bh == NULL) )
{
__end_block_io_op(pending_req, 0);
@@ -473,12 +596,12 @@ static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
memset(bh, 0, sizeof (struct buffer_head));
init_waitqueue_head(&bh->b_wait);
- bh->b_size = phys_seg[i].nr_sects << 9;
- bh->b_dev = phys_seg[i].dev;
- bh->b_rdev = phys_seg[i].dev;
- bh->b_rsector = (unsigned long)phys_seg[i].sector_number;
+ bh->b_size = seg[i].nsec << 9;
+ bh->b_dev = preq.dev;
+ bh->b_rdev = preq.dev;
+ bh->b_rsector = (unsigned long)preq.sector_number;
bh->b_data = (char *)MMAP_VADDR(pending_idx, i) +
- (phys_seg[i].buffer & ~PAGE_MASK);
+ (seg[i].buf & ~PAGE_MASK);
bh->b_page = virt_to_page(MMAP_VADDR(pending_idx, i));
bh->b_end_io = end_block_io_op;
bh->b_private = pending_req;
@@ -492,40 +615,52 @@ static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
/* Dispatch a single request. We'll flush it to disc later. */
generic_make_request(operation, bh);
+
+ preq.sector_number += seg[i].nsec;
}
+
#else
- for ( i = 0; i < nr_psegs; i++ )
- {
- struct bio *bio;
- request_queue_t *q;
- bio = bio_alloc(GFP_ATOMIC, 1);
- if ( unlikely(bio == NULL) )
+ for ( i = 0; i < nseg; i++ )
+ {
+ while ( (bio == NULL) ||
+ (bio_add_page(bio,
+ virt_to_page(MMAP_VADDR(pending_idx, i)),
+ seg[i].nsec << 9,
+ seg[i].buf & ~PAGE_MASK) == 0) )
{
- __end_block_io_op(pending_req, 0);
- continue;
+ bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
+ if ( unlikely(bio == NULL) )
+ {
+ for ( i = 0; i < (nbio-1); i++ )
+ bio_put(biolist[i]);
+ fast_flush_area(pending_idx, nseg);
+ goto bad_descriptor;
+ }
+
+ bio->bi_bdev = preq.bdev;
+ bio->bi_private = pending_req;
+ bio->bi_end_io = end_block_io_op;
+ bio->bi_sector = preq.sector_number;
}
- bio->bi_bdev = phys_seg[i].bdev;
- bio->bi_private = pending_req;
- bio->bi_end_io = end_block_io_op;
- bio->bi_sector = phys_seg[i].sector_number;
+ preq.sector_number += seg[i].nsec;
+ }
- bio_add_page(
- bio,
- virt_to_page(MMAP_VADDR(pending_idx, i)),
- phys_seg[i].nr_sects << 9,
- phys_seg[i].buffer & ~PAGE_MASK);
+ if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue )
+ {
+ flush_plugged_queue();
+ blk_get_queue(q);
+ plugged_queue = q;
+ }
- if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue )
- {
- flush_plugged_queue();
- blk_get_queue(q);
- plugged_queue = q;
- }
+ atomic_set(&pending_req->pendcnt, nbio);
+ pending_cons++;
+ blkif_get(blkif);
+
+ for ( i = 0; i < nbio; i++ )
+ submit_bio(operation, biolist[i]);
- submit_bio(operation, bio);
- }
#endif
return;
@@ -546,16 +681,17 @@ static void make_response(blkif_t *blkif, unsigned long id,
{
blkif_response_t *resp;
unsigned long flags;
+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
/* Place on the response ring for the relevant domain. */
spin_lock_irqsave(&blkif->blk_ring_lock, flags);
- resp = &blkif->blk_ring_base->
- ring[MASK_BLKIF_IDX(blkif->blk_resp_prod)].resp;
+ resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
resp->id = id;
resp->operation = op;
resp->status = st;
wmb(); /* Ensure other side can see the response fields. */
- blkif->blk_ring_base->resp_prod = ++blkif->blk_resp_prod;
+ blk_ring->rsp_prod_pvt++;
+ RING_PUSH_RESPONSES(blk_ring);
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
/* Kick the relevant domain. */
@@ -599,6 +735,15 @@ static int __init blkif_init(void)
#endif
blkif_ctrlif_init();
+
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ memset( pending_grant_handles, BLKBACK_INVALID_HANDLE, MMAP_PAGES );
+ printk(KERN_ALERT "Blkif backend is using grant tables.\n");
+#endif
+
+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
+ printk(KERN_ALERT "NOTE: Blkif backend is running with tap support on!\n");
+#endif
return 0;
}
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blkback/common.h b/linux-2.6.11-xen-sparse/drivers/xen/blkback/common.h
index 4a12ca8fe9..a698e01c64 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/common.h
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blkback/common.h
@@ -15,6 +15,7 @@
#include <asm-xen/ctrl_if.h>
#include <asm-xen/hypervisor.h>
#include <asm-xen/xen-public/io/blkif.h>
+#include <asm-xen/xen-public/io/ring.h>
#if 0
#define ASSERT(_p) \
@@ -36,19 +37,17 @@ struct block_device;
typedef struct blkif_st {
/* Unique identifier for this interface. */
- domid_t domid;
- unsigned int handle;
+ domid_t domid;
+ unsigned int handle;
/* Physical parameters of the comms window. */
- unsigned long shmem_frame;
- unsigned int evtchn;
- int irq;
+ unsigned long shmem_frame;
+ unsigned int evtchn;
+ int irq;
/* Comms information. */
- blkif_ring_t *blk_ring_base; /* ioremap()'ed ptr to shmem_frame. */
- BLKIF_RING_IDX blk_req_cons; /* Request consumer. */
- BLKIF_RING_IDX blk_resp_prod; /* Private version of resp. producer. */
+ blkif_back_ring_t blk_ring;
/* VBDs attached to this interface. */
- rb_root_t vbd_rb; /* Mapping from 16-bit vdevices to VBDs. */
- spinlock_t vbd_lock; /* Protects VBD mapping. */
+ rb_root_t vbd_rb; /* Mapping from 16-bit vdevices to VBDs.*/
+ spinlock_t vbd_lock; /* Protects VBD mapping. */
/* Private fields. */
enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
/*
@@ -56,6 +55,10 @@ typedef struct blkif_st {
* We therefore need to store the id from the original request.
*/
u8 disconnect_rspid;
+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
+ /* Is this a blktap frontend */
+ unsigned int is_blktap;
+#endif
struct blkif_st *hash_next;
struct list_head blkdev_list;
spinlock_t blk_ring_lock;
@@ -77,38 +80,19 @@ blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle);
blkif_disconnect_complete(_b); \
} while (0)
-/* An entry in a list of xen_extents. */
-typedef struct _blkif_extent_le {
- blkif_extent_t extent; /* an individual extent */
- struct _blkif_extent_le *next; /* and a pointer to the next */
- struct block_device *bdev;
-} blkif_extent_le_t;
-
-typedef struct _vbd {
- blkif_vdev_t vdevice; /* what the domain refers to this vbd as */
- unsigned char readonly; /* Non-zero -> read-only */
- unsigned char type; /* VDISK_TYPE_xxx */
- blkif_extent_le_t *extents; /* list of xen_extents making up this vbd */
- rb_node_t rb; /* for linking into R-B tree lookup struct */
-} vbd_t;
-
void vbd_create(blkif_be_vbd_create_t *create);
-void vbd_grow(blkif_be_vbd_grow_t *grow);
-void vbd_shrink(blkif_be_vbd_shrink_t *shrink);
void vbd_destroy(blkif_be_vbd_destroy_t *delete);
int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds);
void destroy_all_vbds(blkif_t *blkif);
-/* Describes a [partial] disk extent (part of a block io request) */
-typedef struct {
+struct phys_req {
unsigned short dev;
unsigned short nr_sects;
struct block_device *bdev;
- unsigned long buffer;
blkif_sector_t sector_number;
-} phys_seg_t;
+};
-int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation);
+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
void blkif_interface_init(void);
void blkif_ctrlif_init(void);
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blkback/control.c b/linux-2.6.11-xen-sparse/drivers/xen/blkback/control.c
index b55bae7b1f..cedfcf7565 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/control.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blkback/control.c
@@ -15,58 +15,32 @@ static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
switch ( msg->subtype )
{
case CMSG_BLKIF_BE_CREATE:
- if ( msg->length != sizeof(blkif_be_create_t) )
- goto parse_error;
blkif_create((blkif_be_create_t *)&msg->msg[0]);
break;
case CMSG_BLKIF_BE_DESTROY:
- if ( msg->length != sizeof(blkif_be_destroy_t) )
- goto parse_error;
blkif_destroy((blkif_be_destroy_t *)&msg->msg[0]);
break;
case CMSG_BLKIF_BE_CONNECT:
- if ( msg->length != sizeof(blkif_be_connect_t) )
- goto parse_error;
blkif_connect((blkif_be_connect_t *)&msg->msg[0]);
break;
case CMSG_BLKIF_BE_DISCONNECT:
- if ( msg->length != sizeof(blkif_be_disconnect_t) )
- goto parse_error;
if ( !blkif_disconnect((blkif_be_disconnect_t *)&msg->msg[0],msg->id) )
return; /* Sending the response is deferred until later. */
break;
case CMSG_BLKIF_BE_VBD_CREATE:
- if ( msg->length != sizeof(blkif_be_vbd_create_t) )
- goto parse_error;
vbd_create((blkif_be_vbd_create_t *)&msg->msg[0]);
break;
case CMSG_BLKIF_BE_VBD_DESTROY:
- if ( msg->length != sizeof(blkif_be_vbd_destroy_t) )
- goto parse_error;
vbd_destroy((blkif_be_vbd_destroy_t *)&msg->msg[0]);
break;
- case CMSG_BLKIF_BE_VBD_GROW:
- if ( msg->length != sizeof(blkif_be_vbd_grow_t) )
- goto parse_error;
- vbd_grow((blkif_be_vbd_grow_t *)&msg->msg[0]);
- break;
- case CMSG_BLKIF_BE_VBD_SHRINK:
- if ( msg->length != sizeof(blkif_be_vbd_shrink_t) )
- goto parse_error;
- vbd_shrink((blkif_be_vbd_shrink_t *)&msg->msg[0]);
- break;
default:
- goto parse_error;
+ DPRINTK("Parse error while reading message subtype %d, len %d\n",
+ msg->subtype, msg->length);
+ msg->length = 0;
+ break;
}
ctrl_if_send_response(msg);
- return;
-
- parse_error:
- DPRINTK("Parse error while reading message subtype %d, len %d\n",
- msg->subtype, msg->length);
- msg->length = 0;
- ctrl_if_send_response(msg);
}
void blkif_ctrlif_init(void)
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blkback/interface.c b/linux-2.6.11-xen-sparse/drivers/xen/blkback/interface.c
index 4196014597..46d55d1fd4 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/interface.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blkback/interface.c
@@ -39,7 +39,7 @@ static void __blkif_disconnect_complete(void *arg)
* must still be notified to the remote driver.
*/
unbind_evtchn_from_irq(blkif->evtchn);
- vfree(blkif->blk_ring_base);
+ vfree(blkif->blk_ring.sring);
/* Construct the deferred response message. */
cmsg.type = CMSG_BLKIF_BE;
@@ -149,14 +149,15 @@ void blkif_destroy(blkif_be_destroy_t *destroy)
void blkif_connect(blkif_be_connect_t *connect)
{
- domid_t domid = connect->domid;
- unsigned int handle = connect->blkif_handle;
- unsigned int evtchn = connect->evtchn;
- unsigned long shmem_frame = connect->shmem_frame;
+ domid_t domid = connect->domid;
+ unsigned int handle = connect->blkif_handle;
+ unsigned int evtchn = connect->evtchn;
+ unsigned long shmem_frame = connect->shmem_frame;
struct vm_struct *vma;
- pgprot_t prot;
- int error;
- blkif_t *blkif;
+ pgprot_t prot;
+ int error;
+ blkif_t *blkif;
+ blkif_sring_t *sring;
blkif = blkif_find_by_handle(domid, handle);
if ( unlikely(blkif == NULL) )
@@ -195,11 +196,13 @@ void blkif_connect(blkif_be_connect_t *connect)
vfree(vma->addr);
return;
}
-
+ sring = (blkif_sring_t *)vma->addr;
+ SHARED_RING_INIT(sring);
+ BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
+
blkif->evtchn = evtchn;
blkif->irq = bind_evtchn_to_irq(evtchn);
blkif->shmem_frame = shmem_frame;
- blkif->blk_ring_base = (blkif_ring_t *)vma->addr;
blkif->status = CONNECTED;
blkif_get(blkif);
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blkback/vbd.c b/linux-2.6.11-xen-sparse/drivers/xen/blkback/vbd.c
index 493a2a7268..f5ee589ee3 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/vbd.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blkback/vbd.c
@@ -7,21 +7,34 @@
* in vbd_translate. All other lookups are implicitly protected because the
* only caller (the control message dispatch routine) serializes the calls.
*
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
*/
#include "common.h"
+struct vbd {
+ blkif_vdev_t vdevice; /* what the domain refers to this vbd as */
+ unsigned char readonly; /* Non-zero -> read-only */
+ unsigned char type; /* VDISK_TYPE_xxx */
+ blkif_pdev_t pdevice; /* phys device that this vbd maps to */
+ struct block_device *bdev;
+ rb_node_t rb; /* for linking into R-B tree lookup struct */
+};
+
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static dev_t vbd_map_devnum(blkif_pdev_t);
+static inline dev_t vbd_map_devnum(blkif_pdev_t cookie)
+{ return MKDEV(cookie>>8, cookie&0xff); }
+#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
+ (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
#define bdev_put(_b) blkdev_put(_b)
#else
+#define vbd_sz(_v) (blk_size[MAJOR((_v)->pdevice)][MINOR((_v)->pdevice)]*2)
#define bdev_put(_b) ((void)0)
#endif
void vbd_create(blkif_be_vbd_create_t *create)
{
- vbd_t *vbd;
+ struct vbd *vbd;
rb_node_t **rb_p, *rb_parent = NULL;
blkif_t *blkif;
blkif_vdev_t vdevice = create->vdevice;
@@ -39,7 +52,7 @@ void vbd_create(blkif_be_vbd_create_t *create)
while ( *rb_p != NULL )
{
rb_parent = *rb_p;
- vbd = rb_entry(rb_parent, vbd_t, rb);
+ vbd = rb_entry(rb_parent, struct vbd, rb);
if ( vdevice < vbd->vdevice )
{
rb_p = &rb_parent->rb_left;
@@ -56,7 +69,7 @@ void vbd_create(blkif_be_vbd_create_t *create)
}
}
- if ( unlikely((vbd = kmalloc(sizeof(vbd_t), GFP_KERNEL)) == NULL) )
+ if ( unlikely((vbd = kmalloc(sizeof(struct vbd), GFP_KERNEL)) == NULL) )
{
DPRINTK("vbd_create: out of memory\n");
create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
@@ -65,216 +78,54 @@ void vbd_create(blkif_be_vbd_create_t *create)
vbd->vdevice = vdevice;
vbd->readonly = create->readonly;
- vbd->type = VDISK_TYPE_DISK;
- vbd->extents = NULL;
-
- spin_lock(&blkif->vbd_lock);
- rb_link_node(&vbd->rb, rb_parent, rb_p);
- rb_insert_color(&vbd->rb, &blkif->vbd_rb);
- spin_unlock(&blkif->vbd_lock);
-
- DPRINTK("Successful creation of vdev=%04x (dom=%u)\n",
- vdevice, create->domid);
- create->status = BLKIF_BE_STATUS_OKAY;
-}
-
-
-/* Grow a VBD by appending a new extent. Fails if the VBD doesn't exist. */
-void vbd_grow(blkif_be_vbd_grow_t *grow)
-{
- blkif_t *blkif;
- blkif_extent_le_t **px, *x;
- vbd_t *vbd = NULL;
- rb_node_t *rb;
- blkif_vdev_t vdevice = grow->vdevice;
- unsigned long sz;
-
- blkif = blkif_find_by_handle(grow->domid, grow->blkif_handle);
- if ( unlikely(blkif == NULL) )
- {
- DPRINTK("vbd_grow attempted for non-existent blkif (%u,%u)\n",
- grow->domid, grow->blkif_handle);
- grow->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
- return;
- }
-
- rb = blkif->vbd_rb.rb_node;
- while ( rb != NULL )
- {
- vbd = rb_entry(rb, vbd_t, rb);
- if ( vdevice < vbd->vdevice )
- rb = rb->rb_left;
- else if ( vdevice > vbd->vdevice )
- rb = rb->rb_right;
- else
- break;
- }
-
- if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) )
- {
- DPRINTK("vbd_grow: attempted to append extent to non-existent VBD.\n");
- grow->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
- return;
- }
-
- if ( grow->extent.sector_start > 0 )
- {
- DPRINTK("vbd_grow: dev %08x start not zero.\n", grow->extent.device);
- grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
- return;
- }
-
- if ( unlikely((x = kmalloc(sizeof(blkif_extent_le_t),
- GFP_KERNEL)) == NULL) )
- {
- DPRINTK("vbd_grow: out of memory\n");
- grow->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
- return;
- }
+ vbd->type = VDISK_TYPE_DISK | VDISK_FLAG_VIRT;
/* Mask to 16-bit for compatibility with old tools */
- x->extent.device = grow->extent.device & 0xffff;
- x->extent.sector_start = grow->extent.sector_start;
- x->extent.sector_length = grow->extent.sector_length;
- x->next = (blkif_extent_le_t *)NULL;
+ vbd->pdevice = create->pdevice & 0xffff;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- x->bdev = open_by_devnum(vbd_map_devnum(x->extent.device),
- vbd->readonly ? FMODE_READ : FMODE_WRITE);
- if ( IS_ERR(x->bdev) )
- {
- DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
- grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
- goto out;
- }
- /* XXXcl maybe bd_claim? */
-
- if ( (x->bdev->bd_disk == NULL) )
- {
- DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
- grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
- bdev_put(x->bdev);
- goto out;
- }
-
- /* get size in sectors */
- if ( x->bdev->bd_part )
- sz = x->bdev->bd_part->nr_sects;
- else
- sz = x->bdev->bd_disk->capacity;
-
- vbd->type = (x->bdev->bd_disk->flags & GENHD_FL_CD) ?
- VDISK_TYPE_CDROM : VDISK_TYPE_DISK;
-
-#else
- if( !blk_size[MAJOR(x->extent.device)] )
- {
- DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
- grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
- goto out;
- }
-
- /* convert blocks (1KB) to sectors */
- sz = blk_size[MAJOR(x->extent.device)][MINOR(x->extent.device)] * 2;
-
- if ( sz == 0 )
- {
- DPRINTK("vbd_grow: device %08x zero size!\n", x->extent.device);
- grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
- goto out;
- }
-#endif
-
- /*
- * NB. This test assumes sector_start == 0, which is always the case
- * in Xen 1.3. In fact the whole grow/shrink interface could do with
- * some simplification.
- */
- if ( x->extent.sector_length > sz )
- x->extent.sector_length = sz;
-
- DPRINTK("vbd_grow: requested_len %llu actual_len %lu\n",
- x->extent.sector_length, sz);
-
- for ( px = &vbd->extents; *px != NULL; px = &(*px)->next )
- continue;
-
- *px = x; /* ATOMIC: no need for vbd_lock. */
-
- DPRINTK("Successful grow of vdev=%04x (dom=%u)\n",
- vdevice, grow->domid);
-
- grow->status = BLKIF_BE_STATUS_OKAY;
- return;
-
- out:
- kfree(x);
-}
-
-
-void vbd_shrink(blkif_be_vbd_shrink_t *shrink)
-{
- blkif_t *blkif;
- blkif_extent_le_t **px, *x;
- vbd_t *vbd = NULL;
- rb_node_t *rb;
- blkif_vdev_t vdevice = shrink->vdevice;
-
- blkif = blkif_find_by_handle(shrink->domid, shrink->blkif_handle);
- if ( unlikely(blkif == NULL) )
+ vbd->bdev = open_by_devnum(
+ vbd_map_devnum(vbd->pdevice),
+ vbd->readonly ? FMODE_READ : FMODE_WRITE);
+ if ( IS_ERR(vbd->bdev) )
{
- DPRINTK("vbd_shrink attempted for non-existent blkif (%u,%u)\n",
- shrink->domid, shrink->blkif_handle);
- shrink->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
+ create->status = BLKIF_BE_STATUS_PHYSDEV_NOT_FOUND;
return;
}
- rb = blkif->vbd_rb.rb_node;
- while ( rb != NULL )
+ if ( (vbd->bdev->bd_disk == NULL) )
{
- vbd = rb_entry(rb, vbd_t, rb);
- if ( vdevice < vbd->vdevice )
- rb = rb->rb_left;
- else if ( vdevice > vbd->vdevice )
- rb = rb->rb_right;
- else
- break;
- }
-
- if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) )
- {
- shrink->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
+ DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
+ create->status = BLKIF_BE_STATUS_PHYSDEV_NOT_FOUND;
+ bdev_put(vbd->bdev);
return;
}
-
- if ( unlikely(vbd->extents == NULL) )
+#else
+ if ( (blk_size[MAJOR(vbd->pdevice)] == NULL) || (vbd_sz(vbd) == 0) )
{
- shrink->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+ DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
+ create->status = BLKIF_BE_STATUS_PHYSDEV_NOT_FOUND;
return;
}
-
- /* Find the last extent. We now know that there is at least one. */
- for ( px = &vbd->extents; (*px)->next != NULL; px = &(*px)->next )
- continue;
-
- x = *px;
- *px = x->next; /* ATOMIC: no need for vbd_lock. */
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- bdev_put(x->bdev);
#endif
- kfree(x);
- shrink->status = BLKIF_BE_STATUS_OKAY;
+ spin_lock(&blkif->vbd_lock);
+ rb_link_node(&vbd->rb, rb_parent, rb_p);
+ rb_insert_color(&vbd->rb, &blkif->vbd_rb);
+ spin_unlock(&blkif->vbd_lock);
+
+ DPRINTK("Successful creation of vdev=%04x (dom=%u)\n",
+ vdevice, create->domid);
+ create->status = BLKIF_BE_STATUS_OKAY;
}
void vbd_destroy(blkif_be_vbd_destroy_t *destroy)
{
blkif_t *blkif;
- vbd_t *vbd;
+ struct vbd *vbd;
rb_node_t *rb;
- blkif_extent_le_t *x, *t;
blkif_vdev_t vdevice = destroy->vdevice;
blkif = blkif_find_by_handle(destroy->domid, destroy->blkif_handle);
@@ -289,7 +140,7 @@ void vbd_destroy(blkif_be_vbd_destroy_t *destroy)
rb = blkif->vbd_rb.rb_node;
while ( rb != NULL )
{
- vbd = rb_entry(rb, vbd_t, rb);
+ vbd = rb_entry(rb, struct vbd, rb);
if ( vdevice < vbd->vdevice )
rb = rb->rb_left;
else if ( vdevice > vbd->vdevice )
@@ -305,66 +156,38 @@ void vbd_destroy(blkif_be_vbd_destroy_t *destroy)
spin_lock(&blkif->vbd_lock);
rb_erase(rb, &blkif->vbd_rb);
spin_unlock(&blkif->vbd_lock);
-
- x = vbd->extents;
+ bdev_put(vbd->bdev);
kfree(vbd);
-
- while ( x != NULL )
- {
- t = x->next;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- bdev_put(x->bdev);
-#endif
- kfree(x);
- x = t;
- }
}
void destroy_all_vbds(blkif_t *blkif)
{
- vbd_t *vbd;
- rb_node_t *rb;
- blkif_extent_le_t *x, *t;
+ struct vbd *vbd;
+ rb_node_t *rb;
spin_lock(&blkif->vbd_lock);
while ( (rb = blkif->vbd_rb.rb_node) != NULL )
{
- vbd = rb_entry(rb, vbd_t, rb);
-
+ vbd = rb_entry(rb, struct vbd, rb);
rb_erase(rb, &blkif->vbd_rb);
- x = vbd->extents;
+ spin_unlock(&blkif->vbd_lock);
+ bdev_put(vbd->bdev);
kfree(vbd);
-
- while ( x != NULL )
- {
- t = x->next;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- bdev_put(x->bdev);
-#endif
- kfree(x);
- x = t;
- }
+ spin_lock(&blkif->vbd_lock);
}
spin_unlock(&blkif->vbd_lock);
}
-static int vbd_probe_single(blkif_t *blkif, vdisk_t *vbd_info, vbd_t *vbd)
+static void vbd_probe_single(
+ blkif_t *blkif, vdisk_t *vbd_info, struct vbd *vbd)
{
- blkif_extent_le_t *x;
-
- vbd_info->device = vbd->vdevice;
- vbd_info->info = vbd->type;
- if ( vbd->readonly )
- vbd_info->info |= VDISK_FLAG_RO;
- vbd_info->capacity = 0ULL;
- for ( x = vbd->extents; x != NULL; x = x->next )
- vbd_info->capacity += x->extent.sector_length;
-
- return 0;
+ vbd_info->device = vbd->vdevice;
+ vbd_info->info = vbd->type | (vbd->readonly ? VDISK_FLAG_RO : 0);
+ vbd_info->capacity = vbd_sz(vbd);
}
@@ -386,9 +209,8 @@ int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds)
for ( ; ; )
{
/* STEP 2. Dealt with left subtree. Now process current node. */
- if ( (rc = vbd_probe_single(blkif, &vbd_info[nr_vbds],
- rb_entry(rb, vbd_t, rb))) != 0 )
- goto out;
+ vbd_probe_single(blkif, &vbd_info[nr_vbds],
+ rb_entry(rb, struct vbd, rb));
if ( ++nr_vbds == max_vbds )
goto out;
@@ -421,13 +243,11 @@ int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds)
}
-int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation)
+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
{
- blkif_extent_le_t *x;
- vbd_t *vbd;
- rb_node_t *rb;
- blkif_sector_t sec_off;
- unsigned long nr_secs;
+ struct vbd *vbd;
+ rb_node_t *rb;
+ int rc = -EACCES;
/* Take the vbd_lock because another thread could be updating the tree. */
spin_lock(&blkif->vbd_lock);
@@ -435,10 +255,10 @@ int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation)
rb = blkif->vbd_rb.rb_node;
while ( rb != NULL )
{
- vbd = rb_entry(rb, vbd_t, rb);
- if ( pseg->dev < vbd->vdevice )
+ vbd = rb_entry(rb, struct vbd, rb);
+ if ( req->dev < vbd->vdevice )
rb = rb->rb_left;
- else if ( pseg->dev > vbd->vdevice )
+ else if ( req->dev > vbd->vdevice )
rb = rb->rb_right;
else
goto found;
@@ -446,138 +266,22 @@ int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation)
DPRINTK("vbd_translate; domain %u attempted to access "
"non-existent VBD.\n", blkif->domid);
-
- spin_unlock(&blkif->vbd_lock);
- return -ENODEV;
+ rc = -ENODEV;
+ goto out;
found:
if ( (operation == WRITE) && vbd->readonly )
- {
- spin_unlock(&blkif->vbd_lock);
- return -EACCES;
- }
+ goto out;
- /*
- * Now iterate through the list of blkif_extents, working out which should
- * be used to perform the translation.
- */
- sec_off = pseg->sector_number;
- nr_secs = pseg->nr_sects;
- for ( x = vbd->extents; x != NULL; x = x->next )
- {
- if ( sec_off < x->extent.sector_length )
- {
- pseg->dev = x->extent.device;
- pseg->bdev = x->bdev;
- pseg->sector_number = x->extent.sector_start + sec_off;
- if ( unlikely((sec_off + nr_secs) > x->extent.sector_length) )
- goto overrun;
- spin_unlock(&blkif->vbd_lock);
- return 1;
- }
- sec_off -= x->extent.sector_length;
- }
+ if ( unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)) )
+ goto out;
- DPRINTK("vbd_translate: end of vbd.\n");
- spin_unlock(&blkif->vbd_lock);
- return -EACCES;
-
- /*
- * Here we deal with overrun onto the following extent. We don't deal with
- * overrun of more than one boundary since each request is restricted to
- * 2^9 512-byte sectors, so it should be trivial for control software to
- * ensure that extents are large enough to prevent excessive overrun.
- */
- overrun:
-
- /* Adjust length of first chunk to run to end of first extent. */
- pseg[0].nr_sects = x->extent.sector_length - sec_off;
-
- /* Set second chunk buffer and length to start where first chunk ended. */
- pseg[1].buffer = pseg[0].buffer + (pseg[0].nr_sects << 9);
- pseg[1].nr_sects = nr_secs - pseg[0].nr_sects;
-
- /* Now move to the next extent. Check it exists and is long enough! */
- if ( unlikely((x = x->next) == NULL) ||
- unlikely(x->extent.sector_length < pseg[1].nr_sects) )
- {
- DPRINTK("vbd_translate: multiple overruns or end of vbd.\n");
- spin_unlock(&blkif->vbd_lock);
- return -EACCES;
- }
+ req->dev = vbd->pdevice;
+ req->bdev = vbd->bdev;
+ rc = 0;
- /* Store the real device and start sector for the second chunk. */
- pseg[1].dev = x->extent.device;
- pseg[1].bdev = x->bdev;
- pseg[1].sector_number = x->extent.sector_start;
-
+ out:
spin_unlock(&blkif->vbd_lock);
- return 2;
-}
-
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-
-#define MAJOR_XEN(dev) ((dev)>>8)
-#define MINOR_XEN(dev) ((dev) & 0xff)
-
-#ifndef FANCY_REMAPPING
-static dev_t vbd_map_devnum(blkif_pdev_t cookie)
-{
- int major = MAJOR_XEN(cookie);
- int minor = MINOR_XEN(cookie);
-
- return MKDEV(major, minor);
-}
-#else
-#define XEN_IDE0_MAJOR IDE0_MAJOR
-#define XEN_IDE1_MAJOR IDE1_MAJOR
-#define XEN_IDE2_MAJOR IDE2_MAJOR
-#define XEN_IDE3_MAJOR IDE3_MAJOR
-#define XEN_IDE4_MAJOR IDE4_MAJOR
-#define XEN_IDE5_MAJOR IDE5_MAJOR
-#define XEN_IDE6_MAJOR IDE6_MAJOR
-#define XEN_IDE7_MAJOR IDE7_MAJOR
-#define XEN_IDE8_MAJOR IDE8_MAJOR
-#define XEN_IDE9_MAJOR IDE9_MAJOR
-#define XEN_SCSI_DISK0_MAJOR SCSI_DISK0_MAJOR
-#define XEN_SCSI_DISK1_MAJOR SCSI_DISK1_MAJOR
-#define XEN_SCSI_DISK2_MAJOR SCSI_DISK2_MAJOR
-#define XEN_SCSI_DISK3_MAJOR SCSI_DISK3_MAJOR
-#define XEN_SCSI_DISK4_MAJOR SCSI_DISK4_MAJOR
-#define XEN_SCSI_DISK5_MAJOR SCSI_DISK5_MAJOR
-#define XEN_SCSI_DISK6_MAJOR SCSI_DISK6_MAJOR
-#define XEN_SCSI_DISK7_MAJOR SCSI_DISK7_MAJOR
-#define XEN_SCSI_CDROM_MAJOR SCSI_CDROM_MAJOR
-
-static dev_t vbd_map_devnum(blkif_pdev_t cookie)
-{
- int new_major;
- int major = MAJOR_XEN(cookie);
- int minor = MINOR_XEN(cookie);
-
- switch (major) {
- case XEN_IDE0_MAJOR: new_major = IDE0_MAJOR; break;
- case XEN_IDE1_MAJOR: new_major = IDE1_MAJOR; break;
- case XEN_IDE2_MAJOR: new_major = IDE2_MAJOR; break;
- case XEN_IDE3_MAJOR: new_major = IDE3_MAJOR; break;
- case XEN_IDE4_MAJOR: new_major = IDE4_MAJOR; break;
- case XEN_IDE5_MAJOR: new_major = IDE5_MAJOR; break;
- case XEN_IDE6_MAJOR: new_major = IDE6_MAJOR; break;
- case XEN_IDE7_MAJOR: new_major = IDE7_MAJOR; break;
- case XEN_IDE8_MAJOR: new_major = IDE8_MAJOR; break;
- case XEN_IDE9_MAJOR: new_major = IDE9_MAJOR; break;
- case XEN_SCSI_DISK0_MAJOR: new_major = SCSI_DISK0_MAJOR; break;
- case XEN_SCSI_DISK1_MAJOR ... XEN_SCSI_DISK7_MAJOR:
- new_major = SCSI_DISK1_MAJOR + major - XEN_SCSI_DISK1_MAJOR;
- break;
- case XEN_SCSI_CDROM_MAJOR: new_major = SCSI_CDROM_MAJOR; break;
- default: new_major = 0; break;
- }
-
- return MKDEV(new_major, minor);
+ return rc;
}
-#endif
-
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION_CODE(2,6,0) */
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c b/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c
index 9c181affab..8c30d50cfb 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c
@@ -6,6 +6,8 @@
* Copyright (c) 2003-2004, Keir Fraser & Steve Hand
* Modifications by Mark A. Williamson are (c) Intel Research Cambridge
* Copyright (c) 2004, Christian Limpach
+ * Copyright (c) 2004, Andrew Warfield
+ * Copyright (c) 2005, Christopher Clark
*
* This file may be distributed separately from the Linux kernel, or
* incorporated into other software packages, subject to the following license:
@@ -29,6 +31,14 @@
* IN THE SOFTWARE.
*/
+#if 1
+#define ASSERT(_p) \
+ if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
+ __LINE__, __FILE__); *(int*)0=0; }
+#else
+#define ASSERT(_p)
+#endif
+
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
@@ -44,6 +54,11 @@
#include <linux/interrupt.h>
#include <scsi/scsi.h>
#include <asm-xen/ctrl_if.h>
+#include <asm-xen/evtchn.h>
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+#include <asm-xen/xen-public/grant_table.h>
+#include <asm-xen/gnttab.h>
+#endif
typedef unsigned char byte; /* from linux/ide.h */
@@ -68,44 +83,47 @@ static unsigned int blkif_irq = 0;
static int blkif_control_rsp_valid;
static blkif_response_t blkif_control_rsp;
-static blkif_ring_t *blk_ring = NULL;
-static BLKIF_RING_IDX resp_cons; /* Response consumer for comms ring. */
-static BLKIF_RING_IDX req_prod; /* Private request producer. */
+static blkif_front_ring_t blk_ring;
+
+#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
-unsigned long rec_ring_free;
-blkif_request_t rec_ring[BLKIF_RING_SIZE];
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+static domid_t rdomid = 0;
+static grant_ref_t gref_head, gref_terminal;
+#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
+ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE)
+#define GRANTREF_INVALID (1<<15)
+#endif
-static int recovery = 0; /* "Recovery in progress" flag. Protected
- * by the blkif_io_lock */
+static struct blk_shadow {
+ blkif_request_t req;
+ unsigned long request;
+ unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+} blk_shadow[BLK_RING_SIZE];
+unsigned long blk_shadow_free;
-/* We plug the I/O ring if the driver is suspended or if the ring is full. */
-#define BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \
- (blkif_state != BLKIF_STATE_CONNECTED))
+static int recovery = 0; /* Recovery in progress: protected by blkif_io_lock */
static void kick_pending_request_queues(void);
int __init xlblk_init(void);
-void blkif_completion( blkif_request_t *req );
+static void blkif_completion(struct blk_shadow *s);
-static inline int GET_ID_FROM_FREELIST( void )
+static inline int GET_ID_FROM_FREELIST(void)
{
- unsigned long free = rec_ring_free;
-
- if ( free > BLKIF_RING_SIZE )
- BUG();
-
- rec_ring_free = rec_ring[free].id;
-
- rec_ring[free].id = 0x0fffffee; /* debug */
-
+ unsigned long free = blk_shadow_free;
+ BUG_ON(free > BLK_RING_SIZE);
+ blk_shadow_free = blk_shadow[free].req.id;
+ blk_shadow[free].req.id = 0x0fffffee; /* debug */
return free;
}
-static inline void ADD_ID_TO_FREELIST( unsigned long id )
+static inline void ADD_ID_TO_FREELIST(unsigned long id)
{
- rec_ring[id].id = rec_ring_free;
- rec_ring_free = id;
+ blk_shadow[id].req.id = blk_shadow_free;
+ blk_shadow[id].request = 0;
+ blk_shadow_free = id;
}
@@ -119,48 +137,43 @@ static int sg_operation = -1;
#define DISABLE_SCATTERGATHER() (sg_operation = -1)
#endif
-static inline void translate_req_to_pfn(blkif_request_t *xreq,
- blkif_request_t *req)
+static inline void pickle_request(struct blk_shadow *s, blkif_request_t *r)
{
+#ifndef CONFIG_XEN_BLKDEV_GRANT
int i;
+#endif
- xreq->operation = req->operation;
- xreq->nr_segments = req->nr_segments;
- xreq->device = req->device;
- /* preserve id */
- xreq->sector_number = req->sector_number;
+ s->req = *r;
- for ( i = 0; i < req->nr_segments; i++ )
- xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
+#ifndef CONFIG_XEN_BLKDEV_GRANT
+ for ( i = 0; i < r->nr_segments; i++ )
+ s->req.frame_and_sects[i] = machine_to_phys(r->frame_and_sects[i]);
+#endif
}
-static inline void translate_req_to_mfn(blkif_request_t *xreq,
- blkif_request_t *req)
+static inline void unpickle_request(blkif_request_t *r, struct blk_shadow *s)
{
+#ifndef CONFIG_XEN_BLKDEV_GRANT
int i;
+#endif
- xreq->operation = req->operation;
- xreq->nr_segments = req->nr_segments;
- xreq->device = req->device;
- xreq->id = req->id; /* copy id (unlike above) */
- xreq->sector_number = req->sector_number;
+ *r = s->req;
- for ( i = 0; i < req->nr_segments; i++ )
- xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
+#ifndef CONFIG_XEN_BLKDEV_GRANT
+ for ( i = 0; i < s->req.nr_segments; i++ )
+ r->frame_and_sects[i] = phys_to_machine(s->req.frame_and_sects[i]);
+#endif
}
static inline void flush_requests(void)
{
DISABLE_SCATTERGATHER();
- wmb(); /* Ensure that the frontend can see the requests. */
- blk_ring->req_prod = req_prod;
+ RING_PUSH_REQUESTS(&blk_ring);
notify_via_evtchn(blkif_evtchn);
}
-
-
/************************** KERNEL VERSION 2.6 **************************/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
@@ -182,7 +195,6 @@ static void vbd_update(void)
static void kick_pending_request_queues(void)
{
-
if ( (xlbd_blk_queue != NULL) &&
test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) )
{
@@ -192,7 +204,6 @@ static void kick_pending_request_queues(void)
*/
xlbd_blk_queue->request_fn(xlbd_blk_queue);
}
-
}
@@ -217,9 +228,8 @@ int blkif_release(struct inode *inode, struct file *filep)
* When usage drops to zero it may allow more VBD updates to occur.
* Update of usage count is protected by a per-device semaphore.
*/
- if (--di->mi->usage == 0) {
+ if ( --di->mi->usage == 0 )
vbd_update();
- }
return 0;
}
@@ -228,14 +238,13 @@ int blkif_release(struct inode *inode, struct file *filep)
int blkif_ioctl(struct inode *inode, struct file *filep,
unsigned command, unsigned long argument)
{
- int i;
- /* struct gendisk *gd = inode->i_bdev->bd_disk; */
+ int i;
DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
command, (long)argument, inode->i_rdev);
- switch (command) {
-
+ switch ( command )
+ {
case HDIO_GETGEO:
/* return ENOSYS to use defaults */
return -ENOSYS;
@@ -255,65 +264,6 @@ int blkif_ioctl(struct inode *inode, struct file *filep,
return 0;
}
-#if 0
-/* check media change: should probably do something here in some cases :-) */
-int blkif_check(kdev_t dev)
-{
- DPRINTK("blkif_check\n");
- return 0;
-}
-
-int blkif_revalidate(kdev_t dev)
-{
- struct block_device *bd;
- struct gendisk *gd;
- xen_block_t *disk;
- unsigned long capacity;
- int i, rc = 0;
-
- if ( (bd = bdget(dev)) == NULL )
- return -EINVAL;
-
- /*
- * Update of partition info, and check of usage count, is protected
- * by the per-block-device semaphore.
- */
- down(&bd->bd_sem);
-
- if ( ((gd = get_gendisk(dev)) == NULL) ||
- ((disk = xldev_to_xldisk(dev)) == NULL) ||
- ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
- {
- rc = -EINVAL;
- goto out;
- }
-
- if ( disk->usage > 1 )
- {
- rc = -EBUSY;
- goto out;
- }
-
- /* Only reread partition table if VBDs aren't mapped to partitions. */
- if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
- {
- for ( i = gd->max_p - 1; i >= 0; i-- )
- {
- invalidate_device(dev+i, 1);
- gd->part[MINOR(dev+i)].start_sect = 0;
- gd->part[MINOR(dev+i)].nr_sects = 0;
- gd->sizes[MINOR(dev+i)] = 0;
- }
-
- grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
- }
-
- out:
- up(&bd->bd_sem);
- bdput(bd);
- return rc;
-}
-#endif
/*
* blkif_queue_request
@@ -336,14 +286,17 @@ static int blkif_queue_request(struct request *req)
int idx;
unsigned long id;
unsigned int fsect, lsect;
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ int ref;
+#endif
if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) )
return 1;
/* Fill out a communications ring structure. */
- ring_req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
+ ring_req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
id = GET_ID_FROM_FREELIST();
- rec_ring[id].id = (unsigned long) req;
+ blk_shadow[id].request = (unsigned long)req;
ring_req->id = id;
ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE :
@@ -361,15 +314,34 @@ static int blkif_queue_request(struct request *req)
buffer_ma = page_to_phys(bvec->bv_page);
fsect = bvec->bv_offset >> 9;
lsect = fsect + (bvec->bv_len >> 9) - 1;
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ /* install a grant reference. */
+ ref = gnttab_claim_grant_reference(&gref_head, gref_terminal);
+ ASSERT( ref != -ENOSPC );
+
+ gnttab_grant_foreign_access_ref(
+ ref,
+ rdomid,
+ buffer_ma >> PAGE_SHIFT,
+ rq_data_dir(req) );
+
+ blk_shadow[id].frame[ring_req->nr_segments] =
+ buffer_ma >> PAGE_SHIFT;
+
+ ring_req->frame_and_sects[ring_req->nr_segments++] =
+ (((u32) ref) << 16) | (fsect << 3) | lsect;
+
+#else
ring_req->frame_and_sects[ring_req->nr_segments++] =
buffer_ma | (fsect << 3) | lsect;
+#endif
}
}
- req_prod++;
-
+ blk_ring.req_prod_pvt++;
+
/* Keep a private copy so we can reissue requests when recovering. */
- translate_req_to_pfn(&rec_ring[id], ring_req);
+ pickle_request(&blk_shadow[id], ring_req);
return 0;
}
@@ -388,30 +360,36 @@ void do_blkif_request(request_queue_t *rq)
queued = 0;
- while ((req = elv_next_request(rq)) != NULL) {
- if (!blk_fs_request(req)) {
+ while ( (req = elv_next_request(rq)) != NULL )
+ {
+ if ( !blk_fs_request(req) )
+ {
end_request(req, 0);
continue;
}
- if ( BLKIF_RING_FULL )
+ if ( RING_FULL(&blk_ring) )
{
blk_stop_queue(rq);
break;
}
+
DPRINTK("do_blk_req %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n",
req, req->cmd, req->sector, req->current_nr_sectors,
req->nr_sectors, req->buffer,
rq_data_dir(req) ? "write" : "read");
+
blkdev_dequeue_request(req);
- if (blkif_queue_request(req)) {
+ if ( blkif_queue_request(req) )
+ {
blk_stop_queue(rq);
break;
}
+
queued++;
}
- if (queued != 0)
+ if ( queued != 0 )
flush_requests();
}
@@ -420,9 +398,9 @@ static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
{
struct request *req;
blkif_response_t *bret;
- BLKIF_RING_IDX i, rp;
+ RING_IDX i, rp;
unsigned long flags;
-
+
spin_lock_irqsave(&blkif_io_lock, flags);
if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) ||
@@ -431,21 +409,21 @@ static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
spin_unlock_irqrestore(&blkif_io_lock, flags);
return IRQ_HANDLED;
}
-
- rp = blk_ring->resp_prod;
+
+ rp = blk_ring.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
- for ( i = resp_cons; i != rp; i++ )
+ for ( i = blk_ring.rsp_cons; i != rp; i++ )
{
unsigned long id;
- bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
- id = bret->id;
- req = (struct request *)rec_ring[id].id;
+ bret = RING_GET_RESPONSE(&blk_ring, i);
+ id = bret->id;
+ req = (struct request *)blk_shadow[id].request;
- blkif_completion( &rec_ring[id] );
+ blkif_completion(&blk_shadow[id]);
- ADD_ID_TO_FREELIST(id); /* overwrites req */
+ ADD_ID_TO_FREELIST(id);
switch ( bret->operation )
{
@@ -454,7 +432,7 @@ static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
DPRINTK("Bad return from blkdev data request: %x\n",
bret->status);
-
+
if ( unlikely(end_that_request_first
(req,
(bret->status == BLKIF_RSP_OKAY),
@@ -471,9 +449,9 @@ static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
BUG();
}
}
-
- resp_cons = i;
+ blk_ring.rsp_cons = i;
+
kick_pending_request_queues();
spin_unlock_irqrestore(&blkif_io_lock, flags);
@@ -522,15 +500,14 @@ static void vbd_update(void)
#endif /* ENABLE_VBD_UPDATE */
/*============================================================================*/
-
static void kick_pending_request_queues(void)
{
/* We kick pending request queues if the ring is reasonably empty. */
if ( (nr_pending != 0) &&
- ((req_prod - resp_cons) < (BLKIF_RING_SIZE >> 1)) )
+ (RING_PENDING_REQUESTS(&blk_ring) < (BLK_RING_SIZE >> 1)) )
{
/* Attempt to drain the queue, but bail if the ring becomes full. */
- while ( (nr_pending != 0) && !BLKIF_RING_FULL )
+ while ( (nr_pending != 0) && !RING_FULL(&blk_ring) )
do_blkif_request(pending_queues[--nr_pending]);
}
}
@@ -783,6 +760,9 @@ static int blkif_queue_request(unsigned long id,
blkif_request_t *req;
struct buffer_head *bh;
unsigned int fsect, lsect;
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ int ref;
+#endif
fsect = (buffer_ma & ~PAGE_MASK) >> 9;
lsect = fsect + nr_sectors - 1;
@@ -824,28 +804,44 @@ static int blkif_queue_request(unsigned long id,
(sg_dev == device) &&
(sg_next_sect == sector_number) )
{
-
- req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod-1)].req;
+ req = RING_GET_REQUEST(&blk_ring,
+ blk_ring.req_prod_pvt - 1);
bh = (struct buffer_head *)id;
- bh->b_reqnext = (struct buffer_head *)rec_ring[req->id].id;
-
+ bh->b_reqnext = (struct buffer_head *)blk_shadow[req->id].request;
+ blk_shadow[req->id].request = (unsigned long)id;
+
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ /* install a grant reference. */
+ ref = gnttab_claim_grant_reference(&gref_head, gref_terminal);
+ ASSERT( ref != -ENOSPC );
+
+ gnttab_grant_foreign_access_ref(
+ ref,
+ rdomid,
+ buffer_ma >> PAGE_SHIFT,
+ ( operation == BLKIF_OP_WRITE ? 1 : 0 ) );
- rec_ring[req->id].id = id;
+ blk_shadow[req->id].frame[req->nr_segments] =
+ buffer_ma >> PAGE_SHIFT;
- req->frame_and_sects[req->nr_segments] =
- buffer_ma | (fsect<<3) | lsect;
+ req->frame_and_sects[req->nr_segments] =
+ (((u32) ref ) << 16) | (fsect << 3) | lsect;
+#else
+ req->frame_and_sects[req->nr_segments] =
+ buffer_ma | (fsect << 3) | lsect;
+#endif
if ( ++req->nr_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST )
sg_next_sect += nr_sectors;
else
DISABLE_SCATTERGATHER();
/* Update the copy of the request in the recovery ring. */
- translate_req_to_pfn(&rec_ring[req->id], req );
+ pickle_request(&blk_shadow[req->id], req );
return 0;
}
- else if ( BLKIF_RING_FULL )
+ else if ( RING_FULL(&blk_ring) )
{
return 1;
}
@@ -862,23 +858,39 @@ static int blkif_queue_request(unsigned long id,
}
/* Fill out a communications ring structure. */
- req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
+ req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
xid = GET_ID_FROM_FREELIST();
- rec_ring[xid].id = id;
+ blk_shadow[xid].request = (unsigned long)id;
req->id = xid;
req->operation = operation;
req->sector_number = (blkif_sector_t)sector_number;
req->device = device;
req->nr_segments = 1;
- req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect;
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ /* install a grant reference. */
+ ref = gnttab_claim_grant_reference(&gref_head, gref_terminal);
+ ASSERT( ref != -ENOSPC );
+
+ gnttab_grant_foreign_access_ref(
+ ref,
+ rdomid,
+ buffer_ma >> PAGE_SHIFT,
+ ( operation == BLKIF_OP_WRITE ? 1 : 0 ) );
- req_prod++;
+ blk_shadow[xid].frame[0] = buffer_ma >> PAGE_SHIFT;
+
+ req->frame_and_sects[0] = (((u32) ref)<<16) | (fsect<<3) | lsect;
+#else
+ req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect;
+#endif
/* Keep a private copy so we can reissue requests when recovering. */
- translate_req_to_pfn(&rec_ring[xid], req );
+ pickle_request(&blk_shadow[xid], req);
+ blk_ring.req_prod_pvt++;
+
return 0;
}
@@ -967,7 +979,7 @@ void do_blkif_request(request_queue_t *rq)
static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
{
- BLKIF_RING_IDX i, rp;
+ RING_IDX i, rp;
unsigned long flags;
struct buffer_head *bh, *next_bh;
@@ -979,18 +991,19 @@ static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
return;
}
- rp = blk_ring->resp_prod;
+ rp = blk_ring.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
- for ( i = resp_cons; i != rp; i++ )
+ for ( i = blk_ring.rsp_cons; i != rp; i++ )
{
unsigned long id;
- blkif_response_t *bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
-
+ blkif_response_t *bret;
+
+ bret = RING_GET_RESPONSE(&blk_ring, i);
id = bret->id;
- bh = (struct buffer_head *)rec_ring[id].id;
+ bh = (struct buffer_head *)blk_shadow[id].request;
- blkif_completion( &rec_ring[id] );
+ blkif_completion(&blk_shadow[id]);
ADD_ID_TO_FREELIST(id);
@@ -1016,10 +1029,10 @@ static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
default:
BUG();
}
+
}
+ blk_ring.rsp_cons = i;
- resp_cons = i;
-
kick_pending_request_queues();
spin_unlock_irqrestore(&io_request_lock, flags);
@@ -1029,35 +1042,51 @@ static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
/***************************** COMMON CODE *******************************/
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+void blkif_control_probe_send(blkif_request_t *req, blkif_response_t *rsp,
+ unsigned long address)
+{
+ int ref = gnttab_claim_grant_reference(&gref_head, gref_terminal);
+ ASSERT( ref != -ENOSPC );
+
+ gnttab_grant_foreign_access_ref( ref, rdomid, address >> PAGE_SHIFT, 0 );
+
+ req->frame_and_sects[0] = (((u32) ref) << 16) | 7;
+
+ blkif_control_send(req, rsp);
+}
+#endif
void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
{
unsigned long flags, id;
+ blkif_request_t *req_d;
retry:
- while ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
+ while ( RING_FULL(&blk_ring) )
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
spin_lock_irqsave(&blkif_io_lock, flags);
- if ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
+ if ( RING_FULL(&blk_ring) )
{
spin_unlock_irqrestore(&blkif_io_lock, flags);
goto retry;
}
DISABLE_SCATTERGATHER();
- blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req = *req;
+ req_d = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
+ *req_d = *req;
id = GET_ID_FROM_FREELIST();
- blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req.id = id;
- rec_ring[id].id = (unsigned long) req;
+ req_d->id = id;
+ blk_shadow[id].request = (unsigned long)req;
- translate_req_to_pfn( &rec_ring[id], req );
+ pickle_request(&blk_shadow[id], req);
- req_prod++;
+ blk_ring.req_prod_pvt++;
flush_requests();
spin_unlock_irqrestore(&blkif_io_lock, flags);
@@ -1099,7 +1128,7 @@ static void blkif_send_interface_connect(void)
blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
msg->handle = 0;
- msg->shmem_frame = (virt_to_machine(blk_ring) >> PAGE_SHIFT);
+ msg->shmem_frame = (virt_to_machine(blk_ring.sring) >> PAGE_SHIFT);
ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
}
@@ -1113,10 +1142,10 @@ static void blkif_free(void)
spin_unlock_irq(&blkif_io_lock);
/* Free resources associated with old device channel. */
- if ( blk_ring != NULL )
+ if ( blk_ring.sring != NULL )
{
- free_page((unsigned long)blk_ring);
- blk_ring = NULL;
+ free_page((unsigned long)blk_ring.sring);
+ blk_ring.sring = NULL;
}
free_irq(blkif_irq, NULL);
blkif_irq = 0;
@@ -1132,10 +1161,14 @@ static void blkif_close(void)
/* Move from CLOSED to DISCONNECTED state. */
static void blkif_disconnect(void)
{
- if ( blk_ring != NULL )
- free_page((unsigned long)blk_ring);
- blk_ring = (blkif_ring_t *)__get_free_page(GFP_KERNEL);
- blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0;
+ blkif_sring_t *sring;
+
+ if ( blk_ring.sring != NULL )
+ free_page((unsigned long)blk_ring.sring);
+
+ sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&blk_ring, sring, PAGE_SIZE);
blkif_state = BLKIF_STATE_DISCONNECTED;
blkif_send_interface_connect();
}
@@ -1149,48 +1182,70 @@ static void blkif_reset(void)
static void blkif_recover(void)
{
int i;
+ blkif_request_t *req;
+ struct blk_shadow *copy;
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ int j;
+#endif
- /* Hmm, requests might be re-ordered when we re-issue them.
- * This will need to be fixed once we have barriers */
+ /* Stage 1: Make a safe copy of the shadow state. */
+ copy = (struct blk_shadow *)kmalloc(sizeof(blk_shadow), GFP_KERNEL);
+ BUG_ON(copy == NULL);
+ memcpy(copy, blk_shadow, sizeof(blk_shadow));
- /* Stage 1 : Find active and move to safety. */
- for ( i = 0; i < BLKIF_RING_SIZE; i++ )
+ /* Stage 2: Set up free list. */
+ memset(&blk_shadow, 0, sizeof(blk_shadow));
+ for ( i = 0; i < BLK_RING_SIZE; i++ )
+ blk_shadow[i].req.id = i+1;
+ blk_shadow_free = blk_ring.req_prod_pvt;
+ blk_shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+
+ /* Stage 3: Find pending requests and requeue them. */
+ for ( i = 0; i < BLK_RING_SIZE; i++ )
{
- if ( rec_ring[i].id >= PAGE_OFFSET )
+ /* Not in use? */
+ if ( copy[i].request == 0 )
+ continue;
+
+ /* Grab a request slot and unpickle shadow state into it. */
+ req = RING_GET_REQUEST(
+ &blk_ring, blk_ring.req_prod_pvt);
+ unpickle_request(req, &copy[i]);
+
+ /* We get a new request id, and must reset the shadow state. */
+ req->id = GET_ID_FROM_FREELIST();
+ memcpy(&blk_shadow[req->id], &copy[i], sizeof(copy[i]));
+
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ /* Rewrite any grant references invalidated by suspend/resume. */
+ for ( j = 0; j < req->nr_segments; j++ )
{
- translate_req_to_mfn(
- &blk_ring->ring[req_prod].req, &rec_ring[i]);
- req_prod++;
+ if ( req->frame_and_sects[j] & GRANTREF_INVALID )
+ gnttab_grant_foreign_access_ref(
+ blkif_gref_from_fas(req->frame_and_sects[j]),
+ rdomid,
+ blk_shadow[req->id].frame[j],
+ rq_data_dir((struct request *)
+ blk_shadow[req->id].request));
+ req->frame_and_sects[j] &= ~GRANTREF_INVALID;
}
- }
+ blk_shadow[req->id].req = *req;
+#endif
- /* Stage 2 : Set up shadow list. */
- for ( i = 0; i < req_prod; i++ )
- {
- rec_ring[i].id = blk_ring->ring[i].req.id;
- blk_ring->ring[i].req.id = i;
- translate_req_to_pfn(&rec_ring[i], &blk_ring->ring[i].req);
+ blk_ring.req_prod_pvt++;
}
- /* Stage 3 : Set up free list. */
- for ( ; i < BLKIF_RING_SIZE; i++ )
- rec_ring[i].id = i+1;
- rec_ring_free = req_prod;
- rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
-
- /* blk_ring->req_prod will be set when we flush_requests().*/
- wmb();
+ kfree(copy);
- /* Switch off recovery mode, using a memory barrier to ensure that
- * it's seen before we flush requests - we don't want to miss any
- * interrupts. */
recovery = 0;
+
+ /* blk_ring->req_prod will be set when we flush_requests().*/
wmb();
/* Kicks things back into life. */
flush_requests();
- /* Now safe to left other peope use interface. */
+ /* Now safe to left other people use the interface. */
blkif_state = BLKIF_STATE_CONNECTED;
}
@@ -1200,6 +1255,9 @@ static void blkif_connect(blkif_fe_interface_status_t *status)
blkif_evtchn = status->evtchn;
blkif_irq = bind_evtchn_to_irq(blkif_evtchn);
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ rdomid = status->domid;
+#endif
err = request_irq(blkif_irq, blkif_int, SA_SAMPLE_RANDOM, "blkif", NULL);
if ( err )
@@ -1238,7 +1296,8 @@ static void blkif_status(blkif_fe_interface_status_t *status)
{
if ( status->handle != blkif_handle )
{
- WPRINTK(" Invalid blkif: handle=%u", status->handle);
+ WPRINTK(" Invalid blkif: handle=%u\n", status->handle);
+ unexpected(status);
return;
}
@@ -1315,21 +1374,15 @@ static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
switch ( msg->subtype )
{
case CMSG_BLKIF_FE_INTERFACE_STATUS:
- if ( msg->length != sizeof(blkif_fe_interface_status_t) )
- goto parse_error;
blkif_status((blkif_fe_interface_status_t *)
&msg->msg[0]);
- break;
+ break;
default:
- goto parse_error;
+ msg->length = 0;
+ break;
}
ctrl_if_send_response(msg);
- return;
-
- parse_error:
- msg->length = 0;
- ctrl_if_send_response(msg);
}
int wait_for_blkif(void)
@@ -1360,17 +1413,25 @@ int wait_for_blkif(void)
int __init xlblk_init(void)
{
int i;
-
+
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ if ( 0 > gnttab_alloc_grant_references( MAXIMUM_OUTSTANDING_BLOCK_REQS,
+ &gref_head, &gref_terminal ))
+ return 1;
+ printk(KERN_ALERT "Blkif frontend is using grant tables.\n");
+#endif
+
if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
(xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
return 0;
IPRINTK("Initialising virtual block device driver\n");
- rec_ring_free = 0;
- for ( i = 0; i < BLKIF_RING_SIZE; i++ )
- rec_ring[i].id = i+1;
- rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
+ blk_shadow_free = 0;
+ memset(blk_shadow, 0, sizeof(blk_shadow));
+ for ( i = 0; i < BLK_RING_SIZE; i++ )
+ blk_shadow[i].req.id = i+1;
+ blk_shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
(void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
CALLBACK_IN_BLOCKING_CONTEXT);
@@ -1386,25 +1447,32 @@ void blkdev_suspend(void)
void blkdev_resume(void)
{
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ int i, j;
+ for ( i = 0; i < BLK_RING_SIZE; i++ )
+ for ( j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++ )
+ blk_shadow[i].req.frame_and_sects[j] |= GRANTREF_INVALID;
+#endif
send_driver_status(1);
}
-/* XXXXX THIS IS A TEMPORARY FUNCTION UNTIL WE GET GRANT TABLES */
-
-void blkif_completion(blkif_request_t *req)
+static void blkif_completion(struct blk_shadow *s)
{
int i;
-
- switch ( req->operation )
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ for ( i = 0; i < s->req.nr_segments; i++ )
+ gnttab_release_grant_reference(
+ &gref_head, blkif_gref_from_fas(s->req.frame_and_sects[i]));
+#else
+ /* This is a hack to get the dirty logging bits set */
+ if ( s->req.operation == BLKIF_OP_READ )
{
- case BLKIF_OP_READ:
- for ( i = 0; i < req->nr_segments; i++ )
+ for ( i = 0; i < s->req.nr_segments; i++ )
{
- unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
+ unsigned long pfn = s->req.frame_and_sects[i] >> PAGE_SHIFT;
unsigned long mfn = phys_to_machine_mapping[pfn];
xen_machphys_update(mfn, pfn);
}
- break;
}
-
+#endif
}
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/block.h b/linux-2.6.11-xen-sparse/drivers/xen/blkfront/block.h
index 7f46506285..459f9d9545 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/block.h
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blkfront/block.h
@@ -46,6 +46,7 @@
#include <linux/devfs_fs_kernel.h>
#include <asm-xen/xen-public/xen.h>
#include <asm-xen/xen-public/io/blkif.h>
+#include <asm-xen/xen-public/io/ring.h>
#include <asm/io.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
@@ -105,6 +106,10 @@ extern int blkif_ioctl(struct inode *inode, struct file *filep,
extern int blkif_check(dev_t dev);
extern int blkif_revalidate(dev_t dev);
extern void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp);
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+extern void blkif_control_probe_send(
+ blkif_request_t *req, blkif_response_t *rsp, unsigned long address);
+#endif
extern void do_blkif_request (request_queue_t *rq);
extern void xlvbd_update_vbds(void);
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blktap/Makefile b/linux-2.6.11-xen-sparse/drivers/xen/blktap/Makefile
new file mode 100644
index 0000000000..80b7ca0627
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blktap/Makefile
@@ -0,0 +1,3 @@
+
+obj-y := blktap_userdev.o blktap_datapath.o blktap_controlmsg.o blktap.o
+
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap.c b/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap.c
new file mode 100644
index 0000000000..a9a00677bc
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap.c
@@ -0,0 +1,87 @@
+/******************************************************************************
+ * blktap.c
+ *
+ * XenLinux virtual block-device tap.
+ *
+ * Copyright (c) 2004, Andrew Warfield
+ *
+ * Based on the original split block driver:
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
+ *
+ * Note that unlike the split block driver code, this driver has been developed
+ * strictly for Linux 2.6
+ */
+
+#include "blktap.h"
+
+int __init xlblktap_init(void)
+{
+ ctrl_msg_t cmsg;
+ blkif_fe_driver_status_t fe_st;
+ blkif_be_driver_status_t be_st;
+
+ printk(KERN_INFO "Initialising Xen block tap device\n");
+
+ DPRINTK(" tap - Backend connection init:\n");
+
+
+ (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+
+ /* Send a driver-UP notification to the domain controller. */
+ cmsg.type = CMSG_BLKIF_FE;
+ cmsg.subtype = CMSG_BLKIF_FE_DRIVER_STATUS;
+ cmsg.length = sizeof(blkif_fe_driver_status_t);
+ fe_st.status = BLKIF_DRIVER_STATUS_UP;
+ memcpy(cmsg.msg, &fe_st, sizeof(fe_st));
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+
+ DPRINTK(" tap - Frontend connection init:\n");
+
+ active_reqs_init();
+ blkif_interface_init();
+ blkdev_schedule_init();
+
+ (void)ctrl_if_register_receiver(CMSG_BLKIF_BE, blkif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+
+ /* Send a driver-UP notification to the domain controller. */
+ cmsg.type = CMSG_BLKIF_BE;
+ cmsg.subtype = CMSG_BLKIF_BE_DRIVER_STATUS;
+ cmsg.length = sizeof(blkif_be_driver_status_t);
+ be_st.status = BLKIF_DRIVER_STATUS_UP;
+ memcpy(cmsg.msg, &be_st, sizeof(be_st));
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+
+ DPRINTK(" tap - Userland channel init:\n");
+
+ blktap_init();
+
+ DPRINTK("Blkif tap device initialized.\n");
+
+ return 0;
+}
+
+#if 0 /* tap doesn't handle suspend/resume */
+void blkdev_suspend(void)
+{
+}
+
+void blkdev_resume(void)
+{
+ ctrl_msg_t cmsg;
+ blkif_fe_driver_status_t st;
+
+ /* Send a driver-UP notification to the domain controller. */
+ cmsg.type = CMSG_BLKIF_FE;
+ cmsg.subtype = CMSG_BLKIF_FE_DRIVER_STATUS;
+ cmsg.length = sizeof(blkif_fe_driver_status_t);
+ st.status = BLKIF_DRIVER_STATUS_UP;
+ memcpy(cmsg.msg, &st, sizeof(st));
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
+#endif
+
+__initcall(xlblktap_init);
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap.h b/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap.h
new file mode 100644
index 0000000000..dac4b0a676
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap.h
@@ -0,0 +1,253 @@
+/*
+ * blktap.h
+ *
+ * Interfaces for the Xen block tap driver.
+ *
+ * (c) 2004, Andrew Warfield, University of Cambridge
+ *
+ */
+
+#ifndef __BLKTAP_H__
+#define __BLKTAP_H__
+
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm-xen/ctrl_if.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/pgalloc.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/xen-public/io/blkif.h>
+#include <asm-xen/xen-public/io/ring.h>
+
+/* Used to signal to the backend that this is a tap domain. */
+#define BLKTAP_COOKIE 0xbeadfeed
+
+/* -------[ debug / pretty printing ]--------------------------------- */
+
+#define PRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+ __FILE__ , __LINE__ , ## _a )
+#if 0
+#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+ __FILE__ , __LINE__ , ## _a )
+#else
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+#if 1
+#define ASSERT(_p) \
+ if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
+ __LINE__, __FILE__); *(int*)0=0; }
+#else
+#define ASSERT(_p) ((void)0)
+#endif
+
+#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
+
+
+/* -------[ state descriptors ]--------------------------------------- */
+
+#define BLKIF_STATE_CLOSED 0
+#define BLKIF_STATE_DISCONNECTED 1
+#define BLKIF_STATE_CONNECTED 2
+
+/* -------[ connection tracking ]------------------------------------- */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#endif
+
+extern spinlock_t blkif_io_lock;
+
+typedef struct blkif_st {
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+ /* Physical parameters of the comms window. */
+ unsigned long shmem_frame;
+ unsigned int evtchn;
+ int irq;
+ /* Comms information. */
+ blkif_back_ring_t blk_ring;
+
+ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+ /*
+ * DISCONNECT response is deferred until pending requests are ack'ed.
+ * We therefore need to store the id from the original request.
+ */
+ u8 disconnect_rspid;
+ struct blkif_st *hash_next;
+ struct list_head blkdev_list;
+ spinlock_t blk_ring_lock;
+ atomic_t refcnt;
+ struct work_struct work;
+} blkif_t;
+
+blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle);
+void blkif_disconnect_complete(blkif_t *blkif);
+#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
+#define blkif_put(_b) \
+ do { \
+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
+ blkif_disconnect_complete(_b); \
+ } while (0)
+
+
+/* -------[ active request tracking ]--------------------------------- */
+
+typedef struct {
+ blkif_t *blkif;
+ unsigned long id;
+ int nr_pages;
+ unsigned long mach_fas[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ unsigned long virt_fas[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ int next_free;
+} active_req_t;
+
+typedef unsigned int ACTIVE_RING_IDX;
+
+active_req_t *lookup_active_req(ACTIVE_RING_IDX idx);
+
+extern inline unsigned int ID_TO_IDX(unsigned long id)
+{
+ return ( id & 0x0000ffff );
+}
+
+extern inline domid_t ID_TO_DOM(unsigned long id)
+{
+ return (id >> 16);
+}
+
+void active_reqs_init(void);
+
+/* -------[ interposition -> character device interface ]------------- */
+
+/* /dev/xen/blktap resides at device number major=10, minor=200 */
+#define BLKTAP_MINOR 202
+
+/* size of the extra VMA area to map in attached pages. */
+#define BLKTAP_VMA_PAGES BLKIF_RING_SIZE
+
+/* blktap IOCTLs: */
+#define BLKTAP_IOCTL_KICK_FE 1
+#define BLKTAP_IOCTL_KICK_BE 2
+#define BLKTAP_IOCTL_SETMODE 3
+#define BLKTAP_IOCTL_PRINT_IDXS 100
+
+/* blktap switching modes: (Set with BLKTAP_IOCTL_SETMODE) */
+#define BLKTAP_MODE_PASSTHROUGH 0x00000000 /* default */
+#define BLKTAP_MODE_INTERCEPT_FE 0x00000001
+#define BLKTAP_MODE_INTERCEPT_BE 0x00000002
+#define BLKTAP_MODE_COPY_FE 0x00000004
+#define BLKTAP_MODE_COPY_BE 0x00000008
+#define BLKTAP_MODE_COPY_FE_PAGES 0x00000010
+#define BLKTAP_MODE_COPY_BE_PAGES 0x00000020
+
+#define BLKTAP_MODE_INTERPOSE \
+ (BLKTAP_MODE_INTERCEPT_FE | BLKTAP_MODE_INTERCEPT_BE)
+
+#define BLKTAP_MODE_COPY_BOTH \
+ (BLKTAP_MODE_COPY_FE | BLKTAP_MODE_COPY_BE)
+
+#define BLKTAP_MODE_COPY_BOTH_PAGES \
+ (BLKTAP_MODE_COPY_FE_PAGES | BLKTAP_MODE_COPY_BE_PAGES)
+
+static inline int BLKTAP_MODE_VALID(unsigned long arg)
+{
+ return (
+ ( arg == BLKTAP_MODE_PASSTHROUGH ) ||
+ ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
+ ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
+ ( arg == BLKTAP_MODE_INTERPOSE ) ||
+ ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
+ ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
+ ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
+ );
+}
+
+
+
+/* -------[ Mappings to User VMA ]------------------------------------ */
+#define MAX_PENDING_REQS 64
+#define BATCH_PER_DOMAIN 16
+extern struct vm_area_struct *blktap_vma;
+
+/* The following are from blkback.c and should probably be put in a
+ * header and included from there.
+ * The mmap area described here is where attached data pages eill be mapped.
+ */
+
+extern unsigned long mmap_vstart;
+#define MMAP_PAGES_PER_REQUEST \
+ (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
+#define MMAP_PAGES \
+ (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
+#define MMAP_VADDR(_req,_seg) \
+ (mmap_vstart + \
+ ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
+ ((_seg) * PAGE_SIZE))
+
+/* immediately before the mmap area, we have a bunch of pages reserved
+ * for shared memory rings.
+ */
+
+#define RING_PAGES 3 /* Ctrl, Front, and Back */
+extern unsigned long rings_vstart;
+
+
+/* -------[ Here be globals ]----------------------------------------- */
+extern unsigned long blktap_mode;
+
+/* Connection to a single backend domain. */
+extern blkif_front_ring_t blktap_be_ring;
+extern unsigned int blktap_be_evtchn;
+extern unsigned int blktap_be_state;
+
+/* User ring status. */
+extern unsigned long blktap_ring_ok;
+
+/* -------[ ...and function prototypes. ]----------------------------- */
+
+/* init function for character device interface. */
+int blktap_init(void);
+
+/* init function for the blkif cache. */
+void __init blkif_interface_init(void);
+void __init blkdev_schedule_init(void);
+void blkif_deschedule(blkif_t *blkif);
+
+/* interfaces to the char driver, passing messages to and from apps. */
+void blktap_kick_user(void);
+
+/* user ring access functions: */
+int blktap_write_fe_ring(blkif_request_t *req);
+int blktap_write_be_ring(blkif_response_t *rsp);
+int blktap_write_ctrl_ring(ctrl_msg_t *msg);
+
+/* fe/be ring access functions: */
+int write_resp_to_fe_ring(blkif_t *blkif, blkif_response_t *rsp);
+int write_req_to_be_ring(blkif_request_t *req);
+
+/* event notification functions */
+void kick_fe_domain(blkif_t *blkif);
+void kick_be_domain(void);
+
+/* Interrupt handlers. */
+irqreturn_t blkif_ptbe_int(int irq, void *dev_id,
+ struct pt_regs *ptregs);
+irqreturn_t blkif_ptfe_int(int irq, void *dev_id, struct pt_regs *regs);
+
+/* Control message receiver. */
+extern void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id);
+
+/* debug */
+void print_fe_ring_idxs(void);
+void print_be_ring_idxs(void);
+
+#define __BLKINT_H__
+#endif
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_controlmsg.c b/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_controlmsg.c
new file mode 100644
index 0000000000..060c6a2dd2
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_controlmsg.c
@@ -0,0 +1,539 @@
+/******************************************************************************
+ * blktap_controlmsg.c
+ *
+ * XenLinux virtual block-device tap.
+ * Control interfaces to the frontend and backend drivers.
+ *
+ * Copyright (c) 2004, Andrew Warfield
+ *
+ */
+
+#include "blktap.h"
+
+static char *blkif_state_name[] = {
+ [BLKIF_STATE_CLOSED] = "closed",
+ [BLKIF_STATE_DISCONNECTED] = "disconnected",
+ [BLKIF_STATE_CONNECTED] = "connected",
+};
+
+static char * blkif_status_name[] = {
+ [BLKIF_INTERFACE_STATUS_CLOSED] = "closed",
+ [BLKIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
+ [BLKIF_INTERFACE_STATUS_CONNECTED] = "connected",
+ [BLKIF_INTERFACE_STATUS_CHANGED] = "changed",
+};
+
+static unsigned blktap_be_irq;
+unsigned int blktap_be_state = BLKIF_STATE_CLOSED;
+unsigned int blktap_be_evtchn;
+
+/*-----[ Control Messages to/from Frontend VMs ]--------------------------*/
+
+#define BLKIF_HASHSZ 1024
+#define BLKIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(BLKIF_HASHSZ-1))
+
+static kmem_cache_t *blkif_cachep;
+static blkif_t *blkif_hash[BLKIF_HASHSZ];
+
+blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle)
+{
+ blkif_t *blkif = blkif_hash[BLKIF_HASH(domid, handle)];
+ while ( (blkif != NULL) &&
+ ((blkif->domid != domid) || (blkif->handle != handle)) )
+ blkif = blkif->hash_next;
+ return blkif;
+}
+
+static void __blkif_disconnect_complete(void *arg)
+{
+ blkif_t *blkif = (blkif_t *)arg;
+ ctrl_msg_t cmsg;
+ blkif_be_disconnect_t disc;
+
+ /*
+ * These can't be done in blkif_disconnect() because at that point there
+ * may be outstanding requests at the disc whose asynchronous responses
+ * must still be notified to the remote driver.
+ */
+ unbind_evtchn_from_irq(blkif->evtchn);
+ vfree(blkif->blk_ring.sring);
+
+ /* Construct the deferred response message. */
+ cmsg.type = CMSG_BLKIF_BE;
+ cmsg.subtype = CMSG_BLKIF_BE_DISCONNECT;
+ cmsg.id = blkif->disconnect_rspid;
+ cmsg.length = sizeof(blkif_be_disconnect_t);
+ disc.domid = blkif->domid;
+ disc.blkif_handle = blkif->handle;
+ disc.status = BLKIF_BE_STATUS_OKAY;
+ memcpy(cmsg.msg, &disc, sizeof(disc));
+
+ /*
+ * Make sure message is constructed /before/ status change, because
+ * after the status change the 'blkif' structure could be deallocated at
+ * any time. Also make sure we send the response /after/ status change,
+ * as otherwise a subsequent CONNECT request could spuriously fail if
+ * another CPU doesn't see the status change yet.
+ */
+ mb();
+ if ( blkif->status != DISCONNECTING )
+ BUG();
+ blkif->status = DISCONNECTED;
+ mb();
+
+ /* Send the successful response. */
+ ctrl_if_send_response(&cmsg);
+}
+
+void blkif_disconnect_complete(blkif_t *blkif)
+{
+ INIT_WORK(&blkif->work, __blkif_disconnect_complete, (void *)blkif);
+ schedule_work(&blkif->work);
+}
+
+void blkif_ptfe_create(blkif_be_create_t *create)
+{
+ blkif_t *blkif, **pblkif;
+ domid_t domid = create->domid;
+ unsigned int handle = create->blkif_handle;
+
+
+ /* May want to store info on the connecting domain here. */
+
+ DPRINTK("PT got BE_CREATE\n");
+
+ if ( (blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL)) == NULL )
+ {
+ WPRINTK("Could not create blkif: out of memory\n");
+ create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ /* blkif struct init code from blkback.c */
+ memset(blkif, 0, sizeof(*blkif));
+ blkif->domid = domid;
+ blkif->handle = handle;
+ blkif->status = DISCONNECTED;
+ spin_lock_init(&blkif->blk_ring_lock);
+ atomic_set(&blkif->refcnt, 0);
+
+ pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
+ while ( *pblkif != NULL )
+ {
+ if ( ((*pblkif)->domid == domid) && ((*pblkif)->handle == handle) )
+ {
+ WPRINTK("Could not create blkif: already exists\n");
+ create->status = BLKIF_BE_STATUS_INTERFACE_EXISTS;
+ kmem_cache_free(blkif_cachep, blkif);
+ return;
+ }
+ pblkif = &(*pblkif)->hash_next;
+ }
+
+ blkif->hash_next = *pblkif;
+ *pblkif = blkif;
+
+ create->status = BLKIF_BE_STATUS_OKAY;
+}
+
+
+void blkif_ptfe_destroy(blkif_be_destroy_t *destroy)
+{
+ /* Clear anything that we initialized above. */
+
+ domid_t domid = destroy->domid;
+ unsigned int handle = destroy->blkif_handle;
+ blkif_t **pblkif, *blkif;
+
+ DPRINTK("PT got BE_DESTROY\n");
+
+ pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
+ while ( (blkif = *pblkif) != NULL )
+ {
+ if ( (blkif->domid == domid) && (blkif->handle == handle) )
+ {
+ if ( blkif->status != DISCONNECTED )
+ goto still_connected;
+ goto destroy;
+ }
+ pblkif = &blkif->hash_next;
+ }
+
+ destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+
+ still_connected:
+ destroy->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
+ return;
+
+ destroy:
+ *pblkif = blkif->hash_next;
+ kmem_cache_free(blkif_cachep, blkif);
+ destroy->status = BLKIF_BE_STATUS_OKAY;
+}
+
+void blkif_ptfe_connect(blkif_be_connect_t *connect)
+{
+ domid_t domid = connect->domid;
+ unsigned int handle = connect->blkif_handle;
+ unsigned int evtchn = connect->evtchn;
+ unsigned long shmem_frame = connect->shmem_frame;
+ struct vm_struct *vma;
+ pgprot_t prot;
+ int error;
+ blkif_t *blkif;
+ blkif_sring_t *sring;
+
+ DPRINTK("PT got BE_CONNECT\n");
+
+ blkif = blkif_find_by_handle(domid, handle);
+ if ( unlikely(blkif == NULL) )
+ {
+ WPRINTK("blkif_connect attempted for non-existent blkif (%u,%u)\n",
+ connect->domid, connect->blkif_handle);
+ connect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL )
+ {
+ connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
+ error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
+ shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+ prot, domid);
+ if ( error != 0 )
+ {
+ WPRINTK("BE_CONNECT: error! (%d)\n", error);
+ if ( error == -ENOMEM )
+ connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+ else if ( error == -EFAULT ) {
+ connect->status = BLKIF_BE_STATUS_MAPPING_ERROR;
+ WPRINTK("BE_CONNECT: MAPPING error!\n");
+ }
+ else
+ connect->status = BLKIF_BE_STATUS_ERROR;
+ vfree(vma->addr);
+ return;
+ }
+
+ if ( blkif->status != DISCONNECTED )
+ {
+ connect->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
+ vfree(vma->addr);
+ return;
+ }
+
+ sring = (blkif_sring_t *)vma->addr;
+ SHARED_RING_INIT(sring);
+ BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
+
+ blkif->evtchn = evtchn;
+ blkif->irq = bind_evtchn_to_irq(evtchn);
+ blkif->shmem_frame = shmem_frame;
+ blkif->status = CONNECTED;
+ blkif_get(blkif);
+
+ request_irq(blkif->irq, blkif_ptfe_int, 0, "blkif-pt-backend", blkif);
+
+ connect->status = BLKIF_BE_STATUS_OKAY;
+}
+
+int blkif_ptfe_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id)
+{
+ domid_t domid = disconnect->domid;
+ unsigned int handle = disconnect->blkif_handle;
+ blkif_t *blkif;
+
+ DPRINTK("PT got BE_DISCONNECT\n");
+
+ blkif = blkif_find_by_handle(domid, handle);
+ if ( unlikely(blkif == NULL) )
+ {
+ WPRINTK("blkif_disconnect attempted for non-existent blkif"
+ " (%u,%u)\n", disconnect->domid, disconnect->blkif_handle);
+ disconnect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return 1; /* Caller will send response error message. */
+ }
+
+ if ( blkif->status == CONNECTED )
+ {
+ blkif->status = DISCONNECTING;
+ blkif->disconnect_rspid = rsp_id;
+ wmb(); /* Let other CPUs see the status change. */
+ free_irq(blkif->irq, blkif);
+ blkif_deschedule(blkif);
+ blkif_put(blkif);
+ return 0; /* Caller should not send response message. */
+ }
+
+ disconnect->status = BLKIF_BE_STATUS_OKAY;
+ return 1;
+}
+
+/*-----[ Control Messages to/from Backend VM ]----------------------------*/
+
+/* Tell the controller to bring up the interface. */
+static void blkif_ptbe_send_interface_connect(void)
+{
+ ctrl_msg_t cmsg = {
+ .type = CMSG_BLKIF_FE,
+ .subtype = CMSG_BLKIF_FE_INTERFACE_CONNECT,
+ .length = sizeof(blkif_fe_interface_connect_t),
+ };
+ blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
+ msg->handle = 0;
+ msg->shmem_frame = virt_to_machine(blktap_be_ring.sring) >> PAGE_SHIFT;
+
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
+
+static void blkif_ptbe_close(void)
+{
+}
+
+/* Move from CLOSED to DISCONNECTED state. */
+static void blkif_ptbe_disconnect(void)
+{
+ blkif_sring_t *sring;
+
+ sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&blktap_be_ring, sring, PAGE_SIZE);
+ blktap_be_state = BLKIF_STATE_DISCONNECTED;
+ DPRINTK("Blkif-Passthrough-BE is now DISCONNECTED.\n");
+ blkif_ptbe_send_interface_connect();
+}
+
+static void blkif_ptbe_connect(blkif_fe_interface_status_t *status)
+{
+ int err = 0;
+
+ blktap_be_evtchn = status->evtchn;
+ blktap_be_irq = bind_evtchn_to_irq(blktap_be_evtchn);
+
+ err = request_irq(blktap_be_irq, blkif_ptbe_int,
+ SA_SAMPLE_RANDOM, "blkif", NULL);
+ if ( err ) {
+ WPRINTK("blkfront request_irq failed (%d)\n", err);
+ return;
+ } else {
+ /* transtion to connected in case we need to do a
+ a partion probe on a whole disk */
+ blktap_be_state = BLKIF_STATE_CONNECTED;
+ }
+}
+
+static void unexpected(blkif_fe_interface_status_t *status)
+{
+ WPRINTK(" TAP: Unexpected blkif status %s in state %s\n",
+ blkif_status_name[status->status],
+ blkif_state_name[blktap_be_state]);
+}
+
+static void blkif_ptbe_status(
+ blkif_fe_interface_status_t *status)
+{
+ if ( status->handle != 0 )
+ {
+ DPRINTK("Status change on unsupported blkif %d\n",
+ status->handle);
+ return;
+ }
+
+ DPRINTK("ptbe_status: got %s\n", blkif_status_name[status->status]);
+
+ switch ( status->status )
+ {
+ case BLKIF_INTERFACE_STATUS_CLOSED:
+ switch ( blktap_be_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ unexpected(status);
+ break;
+ case BLKIF_STATE_DISCONNECTED:
+ case BLKIF_STATE_CONNECTED:
+ unexpected(status);
+ blkif_ptbe_close();
+ break;
+ }
+ break;
+
+ case BLKIF_INTERFACE_STATUS_DISCONNECTED:
+ switch ( blktap_be_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ blkif_ptbe_disconnect();
+ break;
+ case BLKIF_STATE_DISCONNECTED:
+ case BLKIF_STATE_CONNECTED:
+ printk(KERN_ALERT "*** add recovery code to the tap driver. ***\n");
+ unexpected(status);
+ break;
+ }
+ break;
+
+ case BLKIF_INTERFACE_STATUS_CONNECTED:
+ switch ( blktap_be_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ unexpected(status);
+ blkif_ptbe_disconnect();
+ blkif_ptbe_connect(status);
+ break;
+ case BLKIF_STATE_DISCONNECTED:
+ blkif_ptbe_connect(status);
+ break;
+ case BLKIF_STATE_CONNECTED:
+ unexpected(status);
+ blkif_ptbe_connect(status);
+ break;
+ }
+ break;
+
+ case BLKIF_INTERFACE_STATUS_CHANGED:
+ switch ( blktap_be_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ case BLKIF_STATE_DISCONNECTED:
+ unexpected(status);
+ break;
+ case BLKIF_STATE_CONNECTED:
+ /* vbd_update(); */
+ /* tap doesn't really get state changes... */
+ unexpected(status);
+ break;
+ }
+ break;
+
+ default:
+ DPRINTK("Status change to unknown value %d\n", status->status);
+ break;
+ }
+}
+
+/*-----[ All control messages enter here: ]-------------------------------*/
+
+void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+ switch ( msg->type )
+ {
+ case CMSG_BLKIF_FE:
+
+ switch ( msg->subtype )
+ {
+ case CMSG_BLKIF_FE_INTERFACE_STATUS:
+ blkif_ptbe_status((blkif_fe_interface_status_t *) &msg->msg[0]);
+ break;
+
+ default:
+ goto parse_error;
+ }
+
+ break;
+
+ case CMSG_BLKIF_BE:
+
+ /* send a copy of the message to user if wanted */
+
+ if ( (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) ||
+ (blktap_mode & BLKTAP_MODE_COPY_FE) ) {
+
+ blktap_write_ctrl_ring(msg);
+ }
+
+ switch ( msg->subtype )
+ {
+ case CMSG_BLKIF_BE_CREATE:
+ blkif_ptfe_create((blkif_be_create_t *)&msg->msg[0]);
+ break;
+ case CMSG_BLKIF_BE_DESTROY:
+ blkif_ptfe_destroy((blkif_be_destroy_t *)&msg->msg[0]);
+ break;
+ case CMSG_BLKIF_BE_CONNECT:
+ blkif_ptfe_connect((blkif_be_connect_t *)&msg->msg[0]);
+ break;
+ case CMSG_BLKIF_BE_DISCONNECT:
+ if ( !blkif_ptfe_disconnect((blkif_be_disconnect_t *)&msg->msg[0],
+ msg->id) )
+ return;
+ break;
+
+ /* We just ignore anything to do with vbds for now. */
+
+ case CMSG_BLKIF_BE_VBD_CREATE:
+ DPRINTK("PT got VBD_CREATE\n");
+ ((blkif_be_vbd_create_t *)&msg->msg[0])->status
+ = BLKIF_BE_STATUS_OKAY;
+ break;
+ case CMSG_BLKIF_BE_VBD_DESTROY:
+ DPRINTK("PT got VBD_DESTROY\n");
+ ((blkif_be_vbd_destroy_t *)&msg->msg[0])->status
+ = BLKIF_BE_STATUS_OKAY;
+ break;
+ default:
+ goto parse_error;
+ }
+
+ break;
+ }
+
+ ctrl_if_send_response(msg);
+ return;
+
+ parse_error:
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+}
+
+/*-----[ Initialization ]-------------------------------------------------*/
+
+void __init blkif_interface_init(void)
+{
+ blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
+ 0, 0, NULL, NULL);
+ memset(blkif_hash, 0, sizeof(blkif_hash));
+
+ blktap_be_ring.sring = NULL;
+}
+
+
+
+/* Debug : print the current ring indices. */
+
+void print_fe_ring_idxs(void)
+{
+ int i;
+ blkif_t *blkif;
+
+ WPRINTK("FE Rings: \n---------\n");
+ for ( i = 0; i < BLKIF_HASHSZ; i++) {
+ blkif = blkif_hash[i];
+ while (blkif != NULL) {
+ if (blkif->status == DISCONNECTED) {
+ WPRINTK("(%2d,%2d) DISCONNECTED\n",
+ blkif->domid, blkif->handle);
+ } else if (blkif->status == DISCONNECTING) {
+ WPRINTK("(%2d,%2d) DISCONNECTING\n",
+ blkif->domid, blkif->handle);
+ } else if (blkif->blk_ring.sring == NULL) {
+ WPRINTK("(%2d,%2d) CONNECTED, but null sring!\n",
+ blkif->domid, blkif->handle);
+ } else {
+ blkif_get(blkif);
+ WPRINTK("(%2d,%2d): req_cons: %2d, rsp_prod_prv: %2d "
+ "| req_prod: %2d, rsp_prod: %2d\n",
+ blkif->domid, blkif->handle,
+ blkif->blk_ring.req_cons,
+ blkif->blk_ring.rsp_prod_pvt,
+ blkif->blk_ring.sring->req_prod,
+ blkif->blk_ring.sring->rsp_prod);
+ blkif_put(blkif);
+ }
+ blkif = blkif->hash_next;
+ }
+ }
+}
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_datapath.c b/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_datapath.c
new file mode 100644
index 0000000000..7eebcf120f
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_datapath.c
@@ -0,0 +1,453 @@
+/******************************************************************************
+ * blktap_datapath.c
+ *
+ * XenLinux virtual block-device tap.
+ * Block request routing data path.
+ *
+ * Copyright (c) 2004, Andrew Warfield
+ * -- see full header in blktap.c
+ */
+
+#include "blktap.h"
+#include <asm-xen/evtchn.h>
+
+/*-----[ The data paths ]-------------------------------------------------*/
+
+/* Connection to a single backend domain. */
+blkif_front_ring_t blktap_be_ring;
+
+/*-----[ Tracking active requests ]---------------------------------------*/
+
+/* this must be the same as MAX_PENDING_REQS in blkback.c */
+#define MAX_ACTIVE_REQS ((ACTIVE_RING_IDX)64U)
+
+active_req_t active_reqs[MAX_ACTIVE_REQS];
+ACTIVE_RING_IDX active_req_ring[MAX_ACTIVE_REQS];
+spinlock_t active_req_lock = SPIN_LOCK_UNLOCKED;
+ACTIVE_RING_IDX active_prod, active_cons;
+#define MASK_ACTIVE_IDX(_i) ((_i)&(MAX_ACTIVE_REQS-1))
+#define ACTIVE_IDX(_ar) (_ar - active_reqs)
+#define NR_ACTIVE_REQS (MAX_ACTIVE_REQS - active_prod + active_cons)
+
+inline active_req_t *get_active_req(void)
+{
+ ACTIVE_RING_IDX idx;
+ active_req_t *ar;
+ unsigned long flags;
+
+ ASSERT(active_cons != active_prod);
+
+ spin_lock_irqsave(&active_req_lock, flags);
+ idx = active_req_ring[MASK_ACTIVE_IDX(active_cons++)];
+ ar = &active_reqs[idx];
+ spin_unlock_irqrestore(&active_req_lock, flags);
+
+ return ar;
+}
+
+inline void free_active_req(active_req_t *ar)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&active_req_lock, flags);
+ active_req_ring[MASK_ACTIVE_IDX(active_prod++)] = ACTIVE_IDX(ar);
+ spin_unlock_irqrestore(&active_req_lock, flags);
+}
+
+active_req_t *lookup_active_req(ACTIVE_RING_IDX idx)
+{
+ return &active_reqs[idx];
+}
+
+void active_reqs_init(void)
+{
+ ACTIVE_RING_IDX i;
+
+ active_cons = 0;
+ active_prod = MAX_ACTIVE_REQS;
+ memset(active_reqs, 0, sizeof(active_reqs));
+ for ( i = 0; i < MAX_ACTIVE_REQS; i++ )
+ active_req_ring[i] = i;
+}
+
+/* Requests passing through the tap to the backend hijack the id field
+ * in the request message. In it we put the AR index _AND_ the fe domid.
+ * the domid is used by the backend to map the pages properly.
+ */
+
+static inline unsigned long MAKE_ID(domid_t fe_dom, ACTIVE_RING_IDX idx)
+{
+ return ( (fe_dom << 16) | MASK_ACTIVE_IDX(idx) );
+}
+
+/*-----[ Ring helpers ]---------------------------------------------------*/
+
+inline int write_resp_to_fe_ring(blkif_t *blkif, blkif_response_t *rsp)
+{
+ blkif_response_t *resp_d;
+ active_req_t *ar;
+
+ ar = &active_reqs[ID_TO_IDX(rsp->id)];
+ rsp->id = ar->id;
+
+ resp_d = RING_GET_RESPONSE(&blkif->blk_ring,
+ blkif->blk_ring.rsp_prod_pvt);
+ memcpy(resp_d, rsp, sizeof(blkif_response_t));
+ wmb();
+ blkif->blk_ring.rsp_prod_pvt++;
+
+ blkif_put(ar->blkif);
+ free_active_req(ar);
+
+ return 0;
+}
+
+inline int write_req_to_be_ring(blkif_request_t *req)
+{
+ blkif_request_t *req_d;
+
+ if ( blktap_be_state != BLKIF_STATE_CONNECTED ) {
+ WPRINTK("Tap trying to access an unconnected backend!\n");
+ return 0;
+ }
+
+ req_d = RING_GET_REQUEST(&blktap_be_ring,
+ blktap_be_ring.req_prod_pvt);
+ memcpy(req_d, req, sizeof(blkif_request_t));
+ wmb();
+ blktap_be_ring.req_prod_pvt++;
+
+ return 0;
+}
+
+void kick_fe_domain(blkif_t *blkif)
+{
+ RING_PUSH_RESPONSES(&blkif->blk_ring);
+ notify_via_evtchn(blkif->evtchn);
+ DPRINTK("notified FE(dom %u)\n", blkif->domid);
+
+}
+
+void kick_be_domain(void)
+{
+ if ( blktap_be_state != BLKIF_STATE_CONNECTED )
+ return;
+
+ wmb(); /* Ensure that the frontend can see the requests. */
+ RING_PUSH_REQUESTS(&blktap_be_ring);
+ notify_via_evtchn(blktap_be_evtchn);
+ DPRINTK("notified BE\n");
+}
+
+/*-----[ Data to/from Frontend (client) VMs ]-----------------------------*/
+
+/*-----[ Scheduler list maint -from blkback ]--- */
+
+static struct list_head blkio_schedule_list;
+static spinlock_t blkio_schedule_list_lock;
+
+static int __on_blkdev_list(blkif_t *blkif)
+{
+ return blkif->blkdev_list.next != NULL;
+}
+
+static void remove_from_blkdev_list(blkif_t *blkif)
+{
+ unsigned long flags;
+ if ( !__on_blkdev_list(blkif) ) return;
+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+ if ( __on_blkdev_list(blkif) )
+ {
+ list_del(&blkif->blkdev_list);
+ blkif->blkdev_list.next = NULL;
+ blkif_put(blkif);
+ }
+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+}
+
+static void add_to_blkdev_list_tail(blkif_t *blkif)
+{
+ unsigned long flags;
+ if ( __on_blkdev_list(blkif) ) return;
+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+ if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
+ {
+ list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
+ blkif_get(blkif);
+ }
+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+}
+
+
+/*-----[ Scheduler functions - from blkback ]--- */
+
+static DECLARE_WAIT_QUEUE_HEAD(blkio_schedule_wait);
+
+static int do_block_io_op(blkif_t *blkif, int max_to_do);
+
+static int blkio_schedule(void *arg)
+{
+ DECLARE_WAITQUEUE(wq, current);
+
+ blkif_t *blkif;
+ struct list_head *ent;
+
+ daemonize(
+ "xentapd"
+ );
+
+ for ( ; ; )
+ {
+ /* Wait for work to do. */
+ add_wait_queue(&blkio_schedule_wait, &wq);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if ( (NR_ACTIVE_REQS == MAX_ACTIVE_REQS) ||
+ list_empty(&blkio_schedule_list) )
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&blkio_schedule_wait, &wq);
+
+ /* Queue up a batch of requests. */
+ while ( (NR_ACTIVE_REQS < MAX_ACTIVE_REQS) &&
+ !list_empty(&blkio_schedule_list) )
+ {
+ ent = blkio_schedule_list.next;
+ blkif = list_entry(ent, blkif_t, blkdev_list);
+ blkif_get(blkif);
+ remove_from_blkdev_list(blkif);
+ if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
+ add_to_blkdev_list_tail(blkif);
+ blkif_put(blkif);
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ /* Push the batch through to disc. */
+ run_task_queue(&tq_disk);
+#endif
+ }
+}
+
+static void maybe_trigger_blkio_schedule(void)
+{
+ /*
+ * Needed so that two processes, who together make the following predicate
+ * true, don't both read stale values and evaluate the predicate
+ * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
+ */
+ smp_mb();
+
+ if ( (NR_ACTIVE_REQS < (MAX_ACTIVE_REQS)) && /* XXX!!! was M_A_R/2*/
+ !list_empty(&blkio_schedule_list) )
+ wake_up(&blkio_schedule_wait);
+}
+
+void blkif_deschedule(blkif_t *blkif)
+{
+ remove_from_blkdev_list(blkif);
+}
+
+void __init blkdev_schedule_init(void)
+{
+ spin_lock_init(&blkio_schedule_list_lock);
+ INIT_LIST_HEAD(&blkio_schedule_list);
+
+ if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
+ BUG();
+}
+
+/*-----[ Interrupt entry from a frontend ]------ */
+
+irqreturn_t blkif_ptfe_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+ blkif_t *blkif = dev_id;
+
+ add_to_blkdev_list_tail(blkif);
+ maybe_trigger_blkio_schedule();
+ return IRQ_HANDLED;
+}
+
+/*-----[ Other Frontend Ring functions ]-------- */
+
+/* irqreturn_t blkif_ptfe_int(int irq, void *dev_id, struct pt_regs *regs)*/
+static int do_block_io_op(blkif_t *blkif, int max_to_do)
+{
+ /* we have pending messages from the real frontend. */
+
+ blkif_request_t *req_s;
+ RING_IDX i, rp;
+ unsigned long flags;
+ active_req_t *ar;
+ int more_to_do = 0;
+ int notify_be = 0, notify_user = 0;
+
+ DPRINTK("PT got FE interrupt.\n");
+
+ if (NR_ACTIVE_REQS == MAX_ACTIVE_REQS) return 1;
+
+ /* lock both rings */
+ spin_lock_irqsave(&blkif_io_lock, flags);
+
+ rp = blkif->blk_ring.sring->req_prod;
+ rmb();
+
+ for ( i = blkif->blk_ring.req_cons;
+ (i != rp) &&
+ !RING_REQUEST_CONS_OVERFLOW(&blkif->blk_ring, i);
+ i++ )
+ {
+
+ if ((--max_to_do == 0) || (NR_ACTIVE_REQS == MAX_ACTIVE_REQS))
+ {
+ more_to_do = 1;
+ break;
+ }
+
+ req_s = RING_GET_REQUEST(&blkif->blk_ring, i);
+ /* This is a new request:
+ * Assign an active request record, and remap the id.
+ */
+ ar = get_active_req();
+ ar->id = req_s->id;
+ ar->nr_pages = req_s->nr_segments;
+ blkif_get(blkif);
+ ar->blkif = blkif;
+ req_s->id = MAKE_ID(blkif->domid, ACTIVE_IDX(ar));
+ /* WPRINTK("%3u < %3lu\n", ID_TO_IDX(req_s->id), ar->id); */
+
+ /* FE -> BE interposition point is here. */
+
+ /* ------------------------------------------------------------- */
+ /* BLKIF_OP_PROBE_HACK: */
+ /* Signal to the backend that we are a tap domain. */
+
+ if (req_s->operation == BLKIF_OP_PROBE) {
+ DPRINTK("Adding BLKTAP_COOKIE to PROBE request.\n");
+ req_s->frame_and_sects[1] = BLKTAP_COOKIE;
+ }
+
+ /* ------------------------------------------------------------- */
+
+ /* If we are in MODE_INTERCEPT_FE or MODE_COPY_FE: */
+ if ( (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) ||
+ (blktap_mode & BLKTAP_MODE_COPY_FE) ) {
+
+ /* Copy the response message to UFERing */
+ /* In MODE_INTERCEPT_FE, map attached pages into the app vma */
+ /* In MODE_COPY_FE_PAGES, copy attached pages into the app vma */
+
+ DPRINTK("req->UFERing\n");
+ blktap_write_fe_ring(req_s);
+ notify_user = 1;
+ }
+
+ /* If we are not in MODE_INTERCEPT_FE or MODE_INTERCEPT_BE: */
+ if ( !((blktap_mode & BLKTAP_MODE_INTERCEPT_FE) ||
+ (blktap_mode & BLKTAP_MODE_INTERCEPT_BE)) ) {
+
+ /* be included to prevent noise from the fe when its off */
+ /* copy the request message to the BERing */
+
+ DPRINTK("blktap: FERing[%u] -> BERing[%u]\n",
+ (unsigned)i & (RING_SIZE(&blktap_be_ring)-1),
+ (unsigned)blktap_be_ring.req_prod_pvt &
+ (RING_SIZE((&blktap_be_ring)-1)));
+
+ write_req_to_be_ring(req_s);
+ notify_be = 1;
+ }
+ }
+
+ blkif->blk_ring.req_cons = i;
+
+ /* unlock rings */
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ if (notify_user)
+ blktap_kick_user();
+ if (notify_be)
+ kick_be_domain();
+
+ return more_to_do;
+}
+
+/*-----[ Data to/from Backend (server) VM ]------------------------------*/
+
+
+irqreturn_t blkif_ptbe_int(int irq, void *dev_id,
+ struct pt_regs *ptregs)
+{
+ blkif_response_t *resp_s;
+ blkif_t *blkif;
+ RING_IDX rp, i;
+ unsigned long flags;
+
+ DPRINTK("PT got BE interrupt.\n");
+
+ /* lock both rings */
+ spin_lock_irqsave(&blkif_io_lock, flags);
+
+ rp = blktap_be_ring.sring->rsp_prod;
+ rmb();
+
+ for ( i = blktap_be_ring.rsp_cons; i != rp; i++)
+ {
+ resp_s = RING_GET_RESPONSE(&blktap_be_ring, i);
+
+ /* BE -> FE interposition point is here. */
+
+ blkif = active_reqs[ID_TO_IDX(resp_s->id)].blkif;
+
+ /* If we are in MODE_INTERCEPT_BE or MODE_COPY_BE: */
+ if ( (blktap_mode & BLKTAP_MODE_INTERCEPT_BE) ||
+ (blktap_mode & BLKTAP_MODE_COPY_BE) ) {
+
+ /* Copy the response message to UBERing */
+ /* In MODE_INTERCEPT_BE, map attached pages into the app vma */
+ /* In MODE_COPY_BE_PAGES, copy attached pages into the app vma */
+
+ DPRINTK("rsp->UBERing\n");
+ blktap_write_be_ring(resp_s);
+ blktap_kick_user();
+
+ }
+
+ /* If we are NOT in MODE_INTERCEPT_BE or MODE_INTERCEPT_FE: */
+ if ( !((blktap_mode & BLKTAP_MODE_INTERCEPT_BE) ||
+ (blktap_mode & BLKTAP_MODE_INTERCEPT_FE)) ) {
+
+ /* (fe included to prevent random interference from the BE) */
+ /* Copy the response message to FERing */
+
+ DPRINTK("blktap: BERing[%u] -> FERing[%u]\n",
+ (unsigned)i & (RING_SIZE(&blkif->blk_ring)-1),
+ (unsigned)blkif->blk_ring.rsp_prod_pvt &
+ (RING_SIZE((&blkif->blk_ring)-1)));
+
+ write_resp_to_fe_ring(blkif, resp_s);
+ kick_fe_domain(blkif);
+
+ }
+ }
+
+ blktap_be_ring.rsp_cons = i;
+
+
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/* Debug : print the current ring indices. */
+
+void print_be_ring_idxs(void)
+{
+ if (blktap_be_ring.sring != NULL) {
+ WPRINTK("BE Ring: \n--------\n");
+ WPRINTK("BE: rsp_cons: %2d, req_prod_prv: %2d "
+ "| req_prod: %2d, rsp_prod: %2d\n",
+ blktap_be_ring.rsp_cons,
+ blktap_be_ring.req_prod_pvt,
+ blktap_be_ring.sring->req_prod,
+ blktap_be_ring.sring->rsp_prod);
+ }
+}
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_userdev.c b/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_userdev.c
new file mode 100644
index 0000000000..78a487662e
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blktap/blktap_userdev.c
@@ -0,0 +1,471 @@
+/******************************************************************************
+ * blktap_userdev.c
+ *
+ * XenLinux virtual block-device tap.
+ * Control interface between the driver and a character device.
+ *
+ * Copyright (c) 2004, Andrew Warfield
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/errno.h>
+#include <linux/major.h>
+#include <linux/gfp.h>
+#include <linux/poll.h>
+#include <asm/pgalloc.h>
+#include <asm-xen/xen-public/io/blkif.h> /* for control ring. */
+
+#include "blktap.h"
+
+
+unsigned long blktap_mode = BLKTAP_MODE_PASSTHROUGH;
+
+/* Only one process may open /dev/xen/blktap at any time. */
+static unsigned long blktap_dev_inuse;
+unsigned long blktap_ring_ok; /* make this ring->state */
+
+/* for poll: */
+static wait_queue_head_t blktap_wait;
+
+/* Where things are inside the device mapping. */
+struct vm_area_struct *blktap_vma;
+unsigned long mmap_vstart;
+unsigned long rings_vstart;
+
+/* Rings up to user space. */
+static blkif_front_ring_t blktap_ufe_ring;
+static blkif_back_ring_t blktap_ube_ring;
+static ctrl_front_ring_t blktap_uctrl_ring;
+
+/* local prototypes */
+static int blktap_read_fe_ring(void);
+static int blktap_read_be_ring(void);
+
+/* -------[ blktap vm ops ]------------------------------------------- */
+
+static struct page *blktap_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int *type)
+{
+ /*
+ * if the page has not been mapped in by the driver then generate
+ * a SIGBUS to the domain.
+ */
+
+ force_sig(SIGBUS, current);
+
+ return 0;
+}
+
+struct vm_operations_struct blktap_vm_ops = {
+ nopage: blktap_nopage,
+};
+
+/* -------[ blktap file ops ]----------------------------------------- */
+
+static int blktap_open(struct inode *inode, struct file *filp)
+{
+ blkif_sring_t *sring;
+ ctrl_sring_t *csring;
+
+ if ( test_and_set_bit(0, &blktap_dev_inuse) )
+ return -EBUSY;
+
+ printk(KERN_ALERT "blktap open.\n");
+
+ /* Allocate the ctrl ring. */
+ csring = (ctrl_sring_t *)get_zeroed_page(GFP_KERNEL);
+ if (csring == NULL)
+ goto fail_nomem;
+
+ SetPageReserved(virt_to_page(csring));
+
+ SHARED_RING_INIT(csring);
+ FRONT_RING_INIT(&blktap_uctrl_ring, csring, PAGE_SIZE);
+
+ /* Allocate the fe ring. */
+ sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
+ if (sring == NULL)
+ goto fail_free_ctrl;
+
+ SetPageReserved(virt_to_page(sring));
+
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
+
+ /* Allocate the be ring. */
+ sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
+ if (sring == NULL)
+ goto fail_free_fe;
+
+ SetPageReserved(virt_to_page(sring));
+
+ SHARED_RING_INIT(sring);
+ BACK_RING_INIT(&blktap_ube_ring, sring, PAGE_SIZE);
+
+ DPRINTK(KERN_ALERT "blktap open.\n");
+
+ return 0;
+
+ fail_free_ctrl:
+ free_page( (unsigned long) blktap_uctrl_ring.sring);
+
+ fail_free_fe:
+ free_page( (unsigned long) blktap_ufe_ring.sring);
+
+ fail_nomem:
+ return -ENOMEM;
+}
+
+static int blktap_release(struct inode *inode, struct file *filp)
+{
+ blktap_dev_inuse = 0;
+ blktap_ring_ok = 0;
+
+ printk(KERN_ALERT "blktap closed.\n");
+
+ /* Free the ring page. */
+ ClearPageReserved(virt_to_page(blktap_uctrl_ring.sring));
+ free_page((unsigned long) blktap_uctrl_ring.sring);
+
+ ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
+ free_page((unsigned long) blktap_ufe_ring.sring);
+
+ ClearPageReserved(virt_to_page(blktap_ube_ring.sring));
+ free_page((unsigned long) blktap_ube_ring.sring);
+
+ return 0;
+}
+
+/* Note on mmap:
+ * remap_pfn_range sets VM_IO on vma->vm_flags. In trying to make libaio
+ * work to do direct page access from userspace, this ended up being a
+ * problem. The bigger issue seems to be that there is no way to map
+ * a foreign page in to user space and have the virtual address of that
+ * page map sanely down to a mfn.
+ * Removing the VM_IO flag results in a loop in get_user_pages, as
+ * pfn_valid() always fails on a foreign page.
+ */
+static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int size;
+
+ printk(KERN_ALERT "blktap mmap (%lx, %lx)\n",
+ vma->vm_start, vma->vm_end);
+
+ vma->vm_ops = &blktap_vm_ops;
+
+ size = vma->vm_end - vma->vm_start;
+ if ( size != ( (MMAP_PAGES + RING_PAGES) << PAGE_SHIFT ) ) {
+ printk(KERN_INFO
+ "blktap: you _must_ map exactly %d pages!\n",
+ MMAP_PAGES + RING_PAGES);
+ return -EAGAIN;
+ }
+
+ size >>= PAGE_SHIFT;
+ printk(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
+
+ rings_vstart = vma->vm_start;
+ mmap_vstart = rings_vstart + (RING_PAGES << PAGE_SHIFT);
+
+ /* Map the ring pages to the start of the region and reserve it. */
+
+ /* not sure if I really need to do this... */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ DPRINTK("Mapping ctrl_ring page %lx.\n", __pa(blktap_uctrl_ring.sring));
+ if (remap_pfn_range(vma, vma->vm_start,
+ __pa(blktap_uctrl_ring.sring) >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ WPRINTK("ctrl_ring: remap_pfn_range failure!\n");
+ }
+
+
+ DPRINTK("Mapping be_ring page %lx.\n", __pa(blktap_ube_ring.sring));
+ if (remap_pfn_range(vma, vma->vm_start + PAGE_SIZE,
+ __pa(blktap_ube_ring.sring) >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ WPRINTK("be_ring: remap_pfn_range failure!\n");
+ }
+
+ DPRINTK("Mapping fe_ring page %lx.\n", __pa(blktap_ufe_ring.sring));
+ if (remap_pfn_range(vma, vma->vm_start + ( 2 * PAGE_SIZE ),
+ __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ WPRINTK("fe_ring: remap_pfn_range failure!\n");
+ }
+
+ blktap_vma = vma;
+ blktap_ring_ok = 1;
+
+ return 0;
+}
+
+static int blktap_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ switch(cmd) {
+ case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
+ return blktap_read_fe_ring();
+
+ case BLKTAP_IOCTL_KICK_BE: /* There are be messages to process. */
+ return blktap_read_be_ring();
+
+ case BLKTAP_IOCTL_SETMODE:
+ if (BLKTAP_MODE_VALID(arg)) {
+ blktap_mode = arg;
+ /* XXX: may need to flush rings here. */
+ printk(KERN_INFO "blktap: set mode to %lx\n", arg);
+ return 0;
+ }
+ case BLKTAP_IOCTL_PRINT_IDXS:
+ {
+ print_be_ring_idxs();
+ print_fe_ring_idxs();
+ WPRINTK("User Rings: \n-----------\n");
+ WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
+ "| req_prod: %2d, rsp_prod: %2d\n",
+ blktap_ufe_ring.rsp_cons,
+ blktap_ufe_ring.req_prod_pvt,
+ blktap_ufe_ring.sring->req_prod,
+ blktap_ufe_ring.sring->rsp_prod);
+ WPRINTK("UB: req_cons: %2d, rsp_prod_prv: %2d "
+ "| req_prod: %2d, rsp_prod: %2d\n",
+ blktap_ube_ring.req_cons,
+ blktap_ube_ring.rsp_prod_pvt,
+ blktap_ube_ring.sring->req_prod,
+ blktap_ube_ring.sring->rsp_prod);
+
+ }
+ }
+ return -ENOIOCTLCMD;
+}
+
+static unsigned int blktap_poll(struct file *file, poll_table *wait)
+{
+ poll_wait(file, &blktap_wait, wait);
+
+ if ( RING_HAS_UNPUSHED_REQUESTS(&blktap_uctrl_ring) ||
+ RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring) ||
+ RING_HAS_UNPUSHED_RESPONSES(&blktap_ube_ring) ) {
+
+ RING_PUSH_REQUESTS(&blktap_uctrl_ring);
+ RING_PUSH_REQUESTS(&blktap_ufe_ring);
+ RING_PUSH_RESPONSES(&blktap_ube_ring);
+ return POLLIN | POLLRDNORM;
+ }
+
+ return 0;
+}
+
+void blktap_kick_user(void)
+{
+ /* blktap_ring->req_prod = blktap_req_prod; */
+ wake_up_interruptible(&blktap_wait);
+}
+
+static struct file_operations blktap_fops = {
+ owner: THIS_MODULE,
+ poll: blktap_poll,
+ ioctl: blktap_ioctl,
+ open: blktap_open,
+ release: blktap_release,
+ mmap: blktap_mmap,
+};
+
+/*-----[ Data to/from user space ]----------------------------------------*/
+
+
+int blktap_write_fe_ring(blkif_request_t *req)
+{
+ blkif_request_t *target;
+ int error, i;
+
+ /*
+ * This is called to pass a request from the real frontend domain's
+ * blkif ring to the character device.
+ */
+
+ if ( ! blktap_ring_ok ) {
+ DPRINTK("blktap: ufe_ring not ready for a request!\n");
+ return 0;
+ }
+
+ if ( RING_FULL(&blktap_ufe_ring) ) {
+ PRINTK("blktap: fe_ring is full, can't add.\n");
+ return 0;
+ }
+
+ target = RING_GET_REQUEST(&blktap_ufe_ring,
+ blktap_ufe_ring.req_prod_pvt);
+ memcpy(target, req, sizeof(*req));
+
+ /* Attempt to map the foreign pages directly in to the application */
+ for (i=0; i<target->nr_segments; i++) {
+
+ error = direct_remap_area_pages(blktap_vma->vm_mm,
+ MMAP_VADDR(ID_TO_IDX(req->id), i),
+ target->frame_and_sects[i] & PAGE_MASK,
+ PAGE_SIZE,
+ blktap_vma->vm_page_prot,
+ ID_TO_DOM(req->id));
+ if ( error != 0 ) {
+ printk(KERN_INFO "remapping attached page failed! (%d)\n", error);
+ /* the request is now dropped on the floor. */
+ return 0;
+ }
+ }
+
+ blktap_ufe_ring.req_prod_pvt++;
+
+ return 0;
+}
+
+int blktap_write_be_ring(blkif_response_t *rsp)
+{
+ blkif_response_t *target;
+
+ /*
+ * This is called to pass a request from the real backend domain's
+ * blkif ring to the character device.
+ */
+
+ if ( ! blktap_ring_ok ) {
+ DPRINTK("blktap: be_ring not ready for a request!\n");
+ return 0;
+ }
+
+ /* No test for fullness in the response direction. */
+
+ target = RING_GET_RESPONSE(&blktap_ube_ring,
+ blktap_ube_ring.rsp_prod_pvt);
+ memcpy(target, rsp, sizeof(*rsp));
+
+ /* no mapping -- pages were mapped in blktap_write_fe_ring() */
+
+ blktap_ube_ring.rsp_prod_pvt++;
+
+ return 0;
+}
+
+static int blktap_read_fe_ring(void)
+{
+ /* This is called to read responses from the UFE ring. */
+
+ RING_IDX i, rp;
+ blkif_response_t *resp_s;
+ blkif_t *blkif;
+ active_req_t *ar;
+
+ DPRINTK("blktap_read_fe_ring()\n");
+
+ /* if we are forwarding from UFERring to FERing */
+ if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
+
+ /* for each outstanding message on the UFEring */
+ rp = blktap_ufe_ring.sring->rsp_prod;
+ rmb();
+
+ for ( i = blktap_ufe_ring.rsp_cons; i != rp; i++ )
+ {
+ resp_s = RING_GET_RESPONSE(&blktap_ufe_ring, i);
+
+ DPRINTK("resp->fe_ring\n");
+ ar = lookup_active_req(ID_TO_IDX(resp_s->id));
+ blkif = ar->blkif;
+ zap_page_range(blktap_vma, MMAP_VADDR(ID_TO_IDX(resp_s->id), 0),
+ ar->nr_pages << PAGE_SHIFT, NULL);
+ write_resp_to_fe_ring(blkif, resp_s);
+ blktap_ufe_ring.rsp_cons = i + 1;
+ kick_fe_domain(blkif);
+ }
+ }
+ return 0;
+}
+
+static int blktap_read_be_ring(void)
+{
+ /* This is called to read requests from the UBE ring. */
+
+ RING_IDX i, rp;
+ blkif_request_t *req_s;
+
+ DPRINTK("blktap_read_be_ring()\n");
+
+ /* if we are forwarding from UFERring to FERing */
+ if (blktap_mode & BLKTAP_MODE_INTERCEPT_BE) {
+
+ /* for each outstanding message on the UFEring */
+ rp = blktap_ube_ring.sring->req_prod;
+ rmb();
+ for ( i = blktap_ube_ring.req_cons; i != rp; i++ )
+ {
+ req_s = RING_GET_REQUEST(&blktap_ube_ring, i);
+
+ DPRINTK("req->be_ring\n");
+ write_req_to_be_ring(req_s);
+ kick_be_domain();
+ }
+
+ blktap_ube_ring.req_cons = i;
+ }
+
+ return 0;
+}
+
+int blktap_write_ctrl_ring(ctrl_msg_t *msg)
+{
+ ctrl_msg_t *target;
+
+ if ( ! blktap_ring_ok ) {
+ DPRINTK("blktap: be_ring not ready for a request!\n");
+ return 0;
+ }
+
+ /* No test for fullness in the response direction. */
+
+ target = RING_GET_REQUEST(&blktap_uctrl_ring,
+ blktap_uctrl_ring.req_prod_pvt);
+ memcpy(target, msg, sizeof(*msg));
+
+ blktap_uctrl_ring.req_prod_pvt++;
+
+ /* currently treat the ring as unidirectional. */
+ blktap_uctrl_ring.rsp_cons = blktap_uctrl_ring.sring->rsp_prod;
+
+ return 0;
+
+}
+
+/* -------[ blktap module setup ]------------------------------------- */
+
+static struct miscdevice blktap_miscdev = {
+ .minor = BLKTAP_MINOR,
+ .name = "blktap",
+ .fops = &blktap_fops,
+ .devfs_name = "misc/blktap",
+};
+
+int blktap_init(void)
+{
+ int err;
+
+ err = misc_register(&blktap_miscdev);
+ if ( err != 0 )
+ {
+ printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n", err);
+ return err;
+ }
+
+ init_waitqueue_head(&blktap_wait);
+
+
+ return 0;
+}
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/console/console.c b/linux-2.6.11-xen-sparse/drivers/xen/console/console.c
index a524688ec3..142ca2baaa 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/console/console.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/console/console.c
@@ -64,15 +64,34 @@
* warnings from standard distro startup scripts.
*/
static enum { XC_OFF, XC_DEFAULT, XC_TTY, XC_SERIAL } xc_mode = XC_DEFAULT;
+static int xc_num = -1;
static int __init xencons_setup(char *str)
{
- if ( !strcmp(str, "tty") )
- xc_mode = XC_TTY;
- else if ( !strcmp(str, "ttyS") )
+ char *q;
+ int n;
+
+ if ( !strncmp(str, "ttyS", 4) )
xc_mode = XC_SERIAL;
- else if ( !strcmp(str, "off") )
+ else if ( !strncmp(str, "tty", 3) )
+ xc_mode = XC_TTY;
+ else if ( !strncmp(str, "off", 3) )
xc_mode = XC_OFF;
+
+ switch ( xc_mode )
+ {
+ case XC_SERIAL:
+ n = simple_strtol( str+4, &q, 10 );
+ if ( q > (str + 4) ) xc_num = n;
+ break;
+ case XC_TTY:
+ n = simple_strtol( str+3, &q, 10 );
+ if ( q > (str + 3) ) xc_num = n;
+ break;
+ default:
+ break;
+ }
+
return 1;
}
__setup("xencons=", xencons_setup);
@@ -141,16 +160,12 @@ static void kcons_write_dom0(
{
int rc;
- while ( count > 0 )
+ while ( (count > 0) &&
+ ((rc = HYPERVISOR_console_io(
+ CONSOLEIO_write, count, (char *)s)) > 0) )
{
- if ( (rc = HYPERVISOR_console_io(CONSOLEIO_write,
- count, (char *)s)) > 0 )
- {
- count -= rc;
- s += rc;
- }
- else
- break;
+ count -= rc;
+ s += rc;
}
}
@@ -187,8 +202,8 @@ void xen_console_init(void)
xc_mode = XC_SERIAL;
kcons_info.write = kcons_write_dom0;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- if ( xc_mode == XC_SERIAL )
- kcons_info.flags |= CON_ENABLED;
+ if ( xc_mode == XC_SERIAL )
+ kcons_info.flags |= CON_ENABLED;
#endif
}
else
@@ -198,17 +213,26 @@ void xen_console_init(void)
kcons_info.write = kcons_write;
}
- if ( xc_mode == XC_OFF )
- return __RETCODE;
-
- if ( xc_mode == XC_SERIAL )
+ switch ( xc_mode )
+ {
+ case XC_SERIAL:
strcpy(kcons_info.name, "ttyS");
- else
+ if ( xc_num == -1 ) xc_num = 0;
+ break;
+
+ case XC_TTY:
strcpy(kcons_info.name, "tty");
+ if ( xc_num == -1 ) xc_num = 1;
+ break;
+
+ default:
+ return __RETCODE;
+ }
wbuf = alloc_bootmem(wbuf_size);
register_console(&kcons_info);
+
return __RETCODE;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
@@ -247,7 +271,7 @@ void xencons_force_flush(void)
* We use dangerous control-interface functions that require a quiescent
* system and no interrupts. Try to ensure this with a global cli().
*/
- local_irq_disable(); /* XXXsmp */
+ local_irq_disable(); /* XXXsmp */
/* Spin until console data is flushed through to the domain controller. */
while ( (wc != wp) && !ctrl_if_transmitter_empty() )
@@ -488,8 +512,10 @@ static inline int __xencons_put_char(int ch)
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static int xencons_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
+static int xencons_write(
+ struct tty_struct *tty,
+ const unsigned char *buf,
+ int count)
{
int i;
unsigned long flags;
@@ -511,8 +537,11 @@ static int xencons_write(struct tty_struct *tty, const unsigned char *buf,
return i;
}
#else
-static int xencons_write(struct tty_struct *tty, int from_user,
- const u_char *buf, int count)
+static int xencons_write(
+ struct tty_struct *tty,
+ int from_user,
+ const u_char *buf,
+ int count)
{
int i;
unsigned long flags;
@@ -655,7 +684,7 @@ static int xennullcon_dummy(void)
return 0;
}
-#define DUMMY (void *)xennullcon_dummy
+#define DUMMY (void *)xennullcon_dummy
/*
* The console `switch' structure for the dummy console
@@ -718,14 +747,14 @@ static int __init xencons_init(void)
if ( xc_mode == XC_SERIAL )
{
DRV(xencons_driver)->name = "ttyS";
- DRV(xencons_driver)->minor_start = 64;
- DRV(xencons_driver)->name_base = 0;
+ DRV(xencons_driver)->minor_start = 64 + xc_num;
+ DRV(xencons_driver)->name_base = 0 + xc_num;
}
else
{
DRV(xencons_driver)->name = "tty";
- DRV(xencons_driver)->minor_start = 1;
- DRV(xencons_driver)->name_base = 1;
+ DRV(xencons_driver)->minor_start = xc_num;
+ DRV(xencons_driver)->name_base = xc_num;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
@@ -772,8 +801,9 @@ static int __init xencons_init(void)
(void)ctrl_if_register_receiver(CMSG_CONSOLE, xencons_rx, 0);
}
- printk("Xen virtual console successfully installed as %s\n",
- DRV(xencons_driver)->name);
+ printk("Xen virtual console successfully installed as %s%d\n",
+ DRV(xencons_driver)->name,
+ DRV(xencons_driver)->name_base );
return 0;
}
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/evtchn/evtchn.c b/linux-2.6.11-xen-sparse/drivers/xen/evtchn/evtchn.c
index 97d229001d..f5da4283d1 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/evtchn/evtchn.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/evtchn/evtchn.c
@@ -44,6 +44,7 @@
#include <linux/poll.h>
#include <linux/irq.h>
#include <linux/init.h>
+#define XEN_EVTCHN_MASK_OPS
#include <asm-xen/evtchn.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
@@ -60,8 +61,8 @@ static devfs_handle_t xen_dev_dir;
struct per_user_data {
/* Notification ring, accessed via /dev/xen/evtchn. */
-# define RING_SIZE 2048 /* 2048 16-bit entries */
-# define RING_MASK(_i) ((_i)&(RING_SIZE-1))
+# define EVTCHN_RING_SIZE 2048 /* 2048 16-bit entries */
+# define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
u16 *ring;
unsigned int ring_cons, ring_prod, ring_overflow;
@@ -85,9 +86,9 @@ void evtchn_device_upcall(int port)
if ( (u = port_user[port]) != NULL )
{
- if ( (u->ring_prod - u->ring_cons) < RING_SIZE )
+ if ( (u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE )
{
- u->ring[RING_MASK(u->ring_prod)] = (u16)port;
+ u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
if ( u->ring_cons == u->ring_prod++ )
{
wake_up_interruptible(&u->evtchn_wait);
@@ -153,10 +154,10 @@ static ssize_t evtchn_read(struct file *file, char *buf,
}
/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
- if ( ((c ^ p) & RING_SIZE) != 0 )
+ if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 )
{
- bytes1 = (RING_SIZE - RING_MASK(c)) * sizeof(u16);
- bytes2 = RING_MASK(p) * sizeof(u16);
+ bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(u16);
+ bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
}
else
{
@@ -175,7 +176,7 @@ static ssize_t evtchn_read(struct file *file, char *buf,
bytes2 = count - bytes1;
}
- if ( copy_to_user(buf, &u->ring[RING_MASK(c)], bytes1) ||
+ if ( copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2)) )
{
rc = -EFAULT;
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/netback/Makefile b/linux-2.6.11-xen-sparse/drivers/xen/netback/Makefile
index 3279442145..5085bf034d 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/netback/Makefile
+++ b/linux-2.6.11-xen-sparse/drivers/xen/netback/Makefile
@@ -1,2 +1,2 @@
-obj-y := netback.o control.o interface.o
+obj-y := netback.o control.o interface.o loopback.o
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/netback/common.h b/linux-2.6.11-xen-sparse/drivers/xen/netback/common.h
index 0831cbe311..dfb750ee36 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/netback/common.h
+++ b/linux-2.6.11-xen-sparse/drivers/xen/netback/common.h
@@ -35,6 +35,8 @@ typedef struct netif_st {
domid_t domid;
unsigned int handle;
+ u8 fe_dev_addr[6];
+
/* Physical parameters of the comms window. */
unsigned long tx_shmem_frame;
unsigned long rx_shmem_frame;
@@ -76,6 +78,7 @@ typedef struct netif_st {
void netif_create(netif_be_create_t *create);
void netif_destroy(netif_be_destroy_t *destroy);
+void netif_creditlimit(netif_be_creditlimit_t *creditlimit);
void netif_connect(netif_be_connect_t *connect);
int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id);
void netif_disconnect_complete(netif_t *netif);
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/netback/control.c b/linux-2.6.11-xen-sparse/drivers/xen/netback/control.c
index 6ca0ba2c75..7ffaa2f781 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/netback/control.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/netback/control.c
@@ -13,38 +13,29 @@ static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
switch ( msg->subtype )
{
case CMSG_NETIF_BE_CREATE:
- if ( msg->length != sizeof(netif_be_create_t) )
- goto parse_error;
netif_create((netif_be_create_t *)&msg->msg[0]);
break;
case CMSG_NETIF_BE_DESTROY:
- if ( msg->length != sizeof(netif_be_destroy_t) )
- goto parse_error;
netif_destroy((netif_be_destroy_t *)&msg->msg[0]);
- break;
+ break;
+ case CMSG_NETIF_BE_CREDITLIMIT:
+ netif_creditlimit((netif_be_creditlimit_t *)&msg->msg[0]);
+ break;
case CMSG_NETIF_BE_CONNECT:
- if ( msg->length != sizeof(netif_be_connect_t) )
- goto parse_error;
netif_connect((netif_be_connect_t *)&msg->msg[0]);
- break;
+ break;
case CMSG_NETIF_BE_DISCONNECT:
- if ( msg->length != sizeof(netif_be_disconnect_t) )
- goto parse_error;
if ( !netif_disconnect((netif_be_disconnect_t *)&msg->msg[0],msg->id) )
return; /* Sending the response is deferred until later. */
break;
default:
- goto parse_error;
+ DPRINTK("Parse error while reading message subtype %d, len %d\n",
+ msg->subtype, msg->length);
+ msg->length = 0;
+ break;
}
ctrl_if_send_response(msg);
- return;
-
- parse_error:
- DPRINTK("Parse error while reading message subtype %d, len %d\n",
- msg->subtype, msg->length);
- msg->length = 0;
- ctrl_if_send_response(msg);
}
void netif_ctrlif_init(void)
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/netback/interface.c b/linux-2.6.11-xen-sparse/drivers/xen/netback/interface.c
index 1d1a5923c4..f509a1b8dd 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/netback/interface.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/netback/interface.c
@@ -3,7 +3,7 @@
*
* Network-device interface management.
*
- * Copyright (c) 2004, Keir Fraser
+ * Copyright (c) 2004-2005, Keir Fraser
*/
#include "common.h"
@@ -140,7 +140,7 @@ void netif_create(netif_be_create_t *create)
netif->credit_bytes = netif->remaining_credit = ~0UL;
netif->credit_usec = 0UL;
- /*init_ac_timer(&new_vif->credit_timeout);*/
+ init_timer(&netif->credit_timeout);
pnetif = &netif_hash[NETIF_HASH(domid, handle)];
while ( *pnetif != NULL )
@@ -163,13 +163,24 @@ void netif_create(netif_be_create_t *create)
/* Disable queuing. */
dev->tx_queue_len = 0;
- /*
- * Initialise a dummy MAC address. We choose the numerically largest
- * non-broadcast address to prevent the address getting stolen by an
- * Ethernet bridge for STP purposes. (FE:FF:FF:FF:FF:FF)
- */
- memset(dev->dev_addr, 0xFF, ETH_ALEN);
- dev->dev_addr[0] &= ~0x01;
+ if ( (create->be_mac[0] == 0) && (create->be_mac[1] == 0) &&
+ (create->be_mac[2] == 0) && (create->be_mac[3] == 0) &&
+ (create->be_mac[4] == 0) && (create->be_mac[5] == 0) )
+ {
+ /*
+ * Initialise a dummy MAC address. We choose the numerically largest
+ * non-broadcast address to prevent the address getting stolen by an
+ * Ethernet bridge for STP purposes. (FE:FF:FF:FF:FF:FF)
+ */
+ memset(dev->dev_addr, 0xFF, ETH_ALEN);
+ dev->dev_addr[0] &= ~0x01;
+ }
+ else
+ {
+ memcpy(dev->dev_addr, create->be_mac, ETH_ALEN);
+ }
+
+ memcpy(netif->fe_dev_addr, create->mac, ETH_ALEN);
rtnl_lock();
err = register_netdevice(dev);
@@ -223,6 +234,38 @@ void netif_destroy(netif_be_destroy_t *destroy)
destroy->status = NETIF_BE_STATUS_OKAY;
}
+void netif_creditlimit(netif_be_creditlimit_t *creditlimit)
+{
+ domid_t domid = creditlimit->domid;
+ unsigned int handle = creditlimit->netif_handle;
+ netif_t *netif;
+
+ netif = netif_find_by_handle(domid, handle);
+ if ( unlikely(netif == NULL) )
+ {
+ DPRINTK("netif_creditlimit attempted for non-existent netif"
+ " (%u,%u)\n", creditlimit->domid, creditlimit->netif_handle);
+ creditlimit->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ /* Set the credit limit (reset remaining credit to new limit). */
+ netif->credit_bytes = netif->remaining_credit = creditlimit->credit_bytes;
+ netif->credit_usec = creditlimit->period_usec;
+
+ if ( netif->status == CONNECTED )
+ {
+ /*
+ * Schedule work so that any packets waiting under previous credit
+ * limit are dealt with (acts like a replenishment point).
+ */
+ netif->credit_timeout.expires = jiffies;
+ netif_schedule_work(netif);
+ }
+
+ creditlimit->status = NETIF_BE_STATUS_OKAY;
+}
+
void netif_connect(netif_be_connect_t *connect)
{
domid_t domid = connect->domid;
@@ -234,9 +277,6 @@ void netif_connect(netif_be_connect_t *connect)
pgprot_t prot;
int error;
netif_t *netif;
-#if 0
- struct net_device *eth0_dev;
-#endif
netif = netif_find_by_handle(domid, handle);
if ( unlikely(netif == NULL) )
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/netback/netback.c b/linux-2.6.11-xen-sparse/drivers/xen/netback/netback.c
index 6d29cc262f..d08c296a02 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/netback/netback.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/netback/netback.c
@@ -7,11 +7,16 @@
* reference front-end implementation can be found in:
* drivers/xen/netfront/netfront.c
*
- * Copyright (c) 2002-2004, K A Fraser
+ * Copyright (c) 2002-2005, K A Fraser
*/
#include "common.h"
#include <asm-xen/balloon.h>
+#include <asm-xen/evtchn.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#include <linux/delay.h>
+#endif
static void netif_idx_release(u16 pending_idx);
static void netif_page_release(struct page *page);
@@ -33,8 +38,9 @@ static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
static struct timer_list net_timer;
static struct sk_buff_head rx_queue;
-static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2];
-static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3];
+static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2+1];
+static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
+static struct mmuext_op rx_mmuext[NETIF_RX_RING_SIZE];
static unsigned char rx_notify[NR_EVENT_CHANNELS];
/* Don't currently gate addition of an interface to the tx scheduling list. */
@@ -190,8 +196,9 @@ static void net_rx_action(unsigned long unused)
netif_t *netif;
s8 status;
u16 size, id, evtchn;
- mmu_update_t *mmu;
multicall_entry_t *mcl;
+ mmu_update_t *mmu;
+ struct mmuext_op *mmuext;
unsigned long vdata, mdata, new_mfn;
struct sk_buff_head rxq;
struct sk_buff *skb;
@@ -202,6 +209,7 @@ static void net_rx_action(unsigned long unused)
mcl = rx_mcl;
mmu = rx_mmu;
+ mmuext = rx_mmuext;
while ( (skb = skb_dequeue(&rx_queue)) != NULL )
{
netif = netdev_priv(skb->dev);
@@ -224,25 +232,26 @@ static void net_rx_action(unsigned long unused)
*/
phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
- mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- mmu[0].val = __pa(vdata) >> PAGE_SHIFT;
- mmu[1].ptr = MMU_EXTENDED_COMMAND;
- mmu[1].val = MMUEXT_SET_FOREIGNDOM;
- mmu[1].val |= (unsigned long)netif->domid << 16;
- mmu[2].ptr = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
- mmu[2].val = MMUEXT_REASSIGN_PAGE;
+ mcl->op = __HYPERVISOR_update_va_mapping;
+ mcl->args[0] = vdata;
+ mcl->args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
+ mcl->args[2] = 0;
+ mcl++;
- mcl[0].op = __HYPERVISOR_update_va_mapping;
- mcl[0].args[0] = vdata >> PAGE_SHIFT;
- mcl[0].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
- mcl[0].args[2] = 0;
- mcl[1].op = __HYPERVISOR_mmu_update;
- mcl[1].args[0] = (unsigned long)mmu;
- mcl[1].args[1] = 3;
- mcl[1].args[2] = 0;
+ mcl->op = __HYPERVISOR_mmuext_op;
+ mcl->args[0] = (unsigned long)mmuext;
+ mcl->args[1] = 1;
+ mcl->args[2] = 0;
+ mcl->args[3] = netif->domid;
+ mcl++;
- mcl += 2;
- mmu += 3;
+ mmuext->cmd = MMUEXT_REASSIGN_PAGE;
+ mmuext->mfn = mdata >> PAGE_SHIFT;
+ mmuext++;
+
+ mmu->ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ mmu->val = __pa(vdata) >> PAGE_SHIFT;
+ mmu++;
__skb_queue_tail(&rxq, skb);
@@ -254,12 +263,19 @@ static void net_rx_action(unsigned long unused)
if ( mcl == rx_mcl )
return;
- mcl[-2].args[2] = UVMF_FLUSH_TLB;
+ mcl->op = __HYPERVISOR_mmu_update;
+ mcl->args[0] = (unsigned long)rx_mmu;
+ mcl->args[1] = mmu - rx_mmu;
+ mcl->args[2] = 0;
+ mcl->args[3] = DOMID_SELF;
+ mcl++;
+
+ mcl[-3].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
BUG();
mcl = rx_mcl;
- mmu = rx_mmu;
+ mmuext = rx_mmuext;
while ( (skb = __skb_dequeue(&rxq)) != NULL )
{
netif = netdev_priv(skb->dev);
@@ -267,7 +283,7 @@ static void net_rx_action(unsigned long unused)
/* Rederive the machine addresses. */
new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
- mdata = ((mmu[2].ptr & PAGE_MASK) |
+ mdata = ((mmuext[0].mfn << PAGE_SHIFT) |
((unsigned long)skb->data & ~PAGE_MASK));
atomic_set(&(skb_shinfo(skb)->dataref), 1);
@@ -303,7 +319,7 @@ static void net_rx_action(unsigned long unused)
dev_kfree_skb(skb);
mcl += 2;
- mmu += 3;
+ mmuext += 1;
}
while ( notify_nr != 0 )
@@ -379,14 +395,13 @@ void netif_deschedule_work(netif_t *netif)
remove_from_net_schedule_list(netif);
}
-#if 0
+
static void tx_credit_callback(unsigned long data)
{
netif_t *netif = (netif_t *)data;
netif->remaining_credit = netif->credit_bytes;
netif_schedule_work(netif);
}
-#endif
static void net_tx_action(unsigned long unused)
{
@@ -408,13 +423,13 @@ static void net_tx_action(unsigned long unused)
{
pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
mcl[0].op = __HYPERVISOR_update_va_mapping;
- mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
+ mcl[0].args[0] = MMAP_VADDR(pending_idx);
mcl[0].args[1] = 0;
mcl[0].args[2] = 0;
mcl++;
}
- mcl[-1].args[2] = UVMF_FLUSH_TLB;
+ mcl[-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
BUG();
@@ -470,42 +485,52 @@ static void net_tx_action(unsigned long unused)
continue;
}
- netif->tx->req_cons = ++netif->tx_req_cons;
-
- /*
- * 1. Ensure that we see the request when we copy it.
- * 2. Ensure that frontend sees updated req_cons before we check
- * for more work to schedule.
- */
- mb();
-
+ rmb(); /* Ensure that we see the request before we copy it. */
memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
sizeof(txreq));
-#if 0
/* Credit-based scheduling. */
- if ( tx.size > netif->remaining_credit )
+ if ( txreq.size > netif->remaining_credit )
{
- s_time_t now = NOW(), next_credit =
- netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
- if ( next_credit <= now )
+ unsigned long now = jiffies;
+ unsigned long next_credit =
+ netif->credit_timeout.expires +
+ msecs_to_jiffies(netif->credit_usec / 1000);
+
+ /* Timer could already be pending in some rare cases. */
+ if ( timer_pending(&netif->credit_timeout) )
+ break;
+
+ /* Already passed the point at which we can replenish credit? */
+ if ( time_after_eq(now, next_credit) )
{
netif->credit_timeout.expires = now;
netif->remaining_credit = netif->credit_bytes;
}
- else
+
+ /* Still too big to send right now? Then set a timer callback. */
+ if ( txreq.size > netif->remaining_credit )
{
netif->remaining_credit = 0;
netif->credit_timeout.expires = next_credit;
netif->credit_timeout.data = (unsigned long)netif;
netif->credit_timeout.function = tx_credit_callback;
- netif->credit_timeout.cpu = smp_processor_id();
- add_ac_timer(&netif->credit_timeout);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ add_timer_on(&netif->credit_timeout, smp_processor_id());
+#else
+ add_timer(&netif->credit_timeout);
+#endif
break;
}
}
- netif->remaining_credit -= tx.size;
-#endif
+ netif->remaining_credit -= txreq.size;
+
+ /*
+ * Why the barrier? It ensures that the frontend sees updated req_cons
+ * before we check for more work to schedule.
+ */
+ netif->tx->req_cons = ++netif->tx_req_cons;
+ mb();
netif_schedule_work(netif);
@@ -545,7 +570,7 @@ static void net_tx_action(unsigned long unused)
skb_reserve(skb, 16);
mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
- mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
+ mcl[0].args[0] = MMAP_VADDR(pending_idx);
mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
mcl[0].args[2] = 0;
mcl[0].args[3] = netif->domid;
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c b/linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c
index 55d2670944..97f3609fcd 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c
@@ -51,6 +51,7 @@
#include <asm-xen/xen-public/io/netif.h>
#include <asm-xen/balloon.h>
#include <asm/page.h>
+#include <asm/uaccess.h>
#ifndef __GFP_NOWARN
#define __GFP_NOWARN 0
@@ -394,19 +395,13 @@ static void network_alloc_rx_buffers(struct net_device *dev)
= INVALID_P2M_ENTRY;
rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
- rx_mcl[i].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
+ rx_mcl[i].args[0] = (unsigned long)skb->head;
rx_mcl[i].args[1] = 0;
rx_mcl[i].args[2] = 0;
}
- /*
- * We may have allocated buffers which have entries outstanding in the page
- * update queue -- make sure we flush those first!
- */
- flush_page_update_queue();
-
/* After all PTEs have been zapped we blow away stale TLB entries. */
- rx_mcl[i-1].args[2] = UVMF_FLUSH_TLB;
+ rx_mcl[i-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
/* Give away a batch of pages. */
rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
@@ -586,7 +581,7 @@ static int netif_poll(struct net_device *dev, int *pbudget)
mmu->val = __pa(skb->head) >> PAGE_SHIFT;
mmu++;
mcl->op = __HYPERVISOR_update_va_mapping;
- mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
+ mcl->args[0] = (unsigned long)skb->head;
mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
mcl->args[2] = 0;
mcl++;
@@ -606,6 +601,7 @@ static int netif_poll(struct net_device *dev, int *pbudget)
mcl->args[0] = (unsigned long)rx_mmu;
mcl->args[1] = mmu - rx_mmu;
mcl->args[2] = 0;
+ mcl->args[3] = DOMID_SELF;
mcl++;
(void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
}
@@ -1122,18 +1118,13 @@ static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
switch (msg->subtype) {
case CMSG_NETIF_FE_INTERFACE_STATUS:
- if (msg->length != sizeof(netif_fe_interface_status_t))
- goto error;
netif_interface_status((netif_fe_interface_status_t *) &msg->msg[0]);
break;
case CMSG_NETIF_FE_DRIVER_STATUS:
- if (msg->length != sizeof(netif_fe_driver_status_t))
- goto error;
netif_driver_status((netif_fe_driver_status_t *) &msg->msg[0]);
break;
- error:
default:
msg->length = 0;
break;
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/privcmd/privcmd.c b/linux-2.6.11-xen-sparse/drivers/xen/privcmd/privcmd.c
index 98e7e92ff4..2f9d5fde38 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/privcmd/privcmd.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/privcmd/privcmd.c
@@ -28,6 +28,11 @@
#include <asm-xen/xen-public/dom0_ops.h>
#include <asm-xen/xen_proc.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define pud_t pgd_t
+#define pud_offset(d, va) d
+#endif
+
static struct proc_dir_entry *privcmd_intf;
static int privcmd_ioctl(struct inode *inode, struct file *file,
@@ -44,6 +49,7 @@ static int privcmd_ioctl(struct inode *inode, struct file *file,
if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
return -EFAULT;
+#if defined(__i386__)
__asm__ __volatile__ (
"pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
"movl 4(%%eax),%%ebx ;"
@@ -55,7 +61,18 @@ static int privcmd_ioctl(struct inode *inode, struct file *file,
TRAP_INSTR "; "
"popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
: "=a" (ret) : "0" (&hypercall) : "memory" );
-
+#elif defined (__x86_64__)
+ __asm__ __volatile__ (
+ "movq %5,%%r10; movq %6,%%r8;" TRAP_INSTR
+ : "=a" (ret)
+ : "a" ((unsigned long)hypercall.op),
+ "D" ((unsigned long)hypercall.arg[0]),
+ "S" ((unsigned long)hypercall.arg[1]),
+ "d" ((unsigned long)hypercall.arg[2]),
+ "g" ((unsigned long)hypercall.arg[3]),
+ "g" ((unsigned long)hypercall.arg[4])
+ : "r11","rcx","r8","r10","memory");
+#endif
}
break;
@@ -83,6 +100,8 @@ static int privcmd_ioctl(struct inode *inode, struct file *file,
{
int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
+
+
if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
return -EFAULT;
@@ -115,8 +134,7 @@ static int privcmd_ioctl(struct inode *inode, struct file *file,
case IOCTL_PRIVCMD_MMAPBATCH:
{
-#define MAX_DIRECTMAP_MMU_QUEUE 130
- mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
+ mmu_update_t u;
privcmd_mmapbatch_t m;
struct vm_area_struct *vma = NULL;
unsigned long *p, addr;
@@ -137,11 +155,6 @@ static int privcmd_ioctl(struct inode *inode, struct file *file,
if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
{ ret = -EFAULT; goto batch_err; }
- u[0].ptr = MMU_EXTENDED_COMMAND;
- u[0].val = MMUEXT_SET_FOREIGNDOM;
- u[0].val |= (unsigned long)m.dom << 16;
- v = w = &u[1];
-
p = m.arr;
addr = m.addr;
for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
@@ -149,24 +162,24 @@ static int privcmd_ioctl(struct inode *inode, struct file *file,
if ( get_user(mfn, p) )
return -EFAULT;
- v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
+ u.val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
__direct_remap_area_pages(vma->vm_mm,
addr,
PAGE_SIZE,
- v);
-
- if ( unlikely(HYPERVISOR_mmu_update(u, v - u + 1, NULL) < 0) )
- put_user( 0xF0000000 | mfn, p );
+ &u);
- v = w;
+ if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
+ put_user(0xF0000000 | mfn, p);
}
+
ret = 0;
break;
batch_err:
printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n",
- ret, vma, m.addr, m.num, m.arr, vma->vm_start, vma->vm_end);
+ ret, vma, m.addr, m.num, m.arr,
+ vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
break;
}
break;
@@ -174,13 +187,12 @@ static int privcmd_ioctl(struct inode *inode, struct file *file,
case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
{
- unsigned long m2p_start_mfn =
- HYPERVISOR_shared_info->arch.mfn_to_pfn_start;
-
- if( put_user( m2p_start_mfn, (unsigned long *) data ) )
- ret = -EFAULT;
- else
- ret = 0;
+ unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
+ pgd_t *pgd = pgd_offset_k(m2pv);
+ pud_t *pud = pud_offset(pgd, m2pv);
+ pmd_t *pmd = pmd_offset(pud, m2pv);
+ unsigned long m2p_start_mfn = (*(unsigned long *)pmd) >> PAGE_SHIFT;
+ ret = put_user(m2p_start_mfn, (unsigned long *)data) ? -EFAULT: 0;
}
break;
@@ -207,9 +219,6 @@ static struct file_operations privcmd_file_ops = {
static int __init privcmd_init(void)
{
- if ( !(xen_start_info.flags & SIF_PRIVILEGED) )
- return 0;
-
privcmd_intf = create_xen_proc_entry("privcmd", 0400);
if ( privcmd_intf != NULL )
privcmd_intf->proc_fops = &privcmd_file_ops;
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/usbback/common.h b/linux-2.6.11-xen-sparse/drivers/xen/usbback/common.h
new file mode 100644
index 0000000000..bcab2041bc
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/usbback/common.h
@@ -0,0 +1,85 @@
+
+#ifndef __USBIF__BACKEND__COMMON_H__
+#define __USBIF__BACKEND__COMMON_H__
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/pgalloc.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/hypervisor.h>
+
+#include <asm-xen/xen-public/io/usbif.h>
+
+#if 0
+#define ASSERT(_p) \
+ if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
+ __LINE__, __FILE__); *(int*)0=0; }
+#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+ __FILE__ , __LINE__ , ## _a )
+#else
+#define ASSERT(_p) ((void)0)
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+typedef struct usbif_priv_st usbif_priv_t;
+
+struct usbif_priv_st {
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+ /* Physical parameters of the comms window. */
+ unsigned long shmem_frame;
+ unsigned int evtchn;
+ int irq;
+ /* Comms Information */
+ usbif_back_ring_t usb_ring;
+ /* Private fields. */
+ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+ /*
+ * DISCONNECT response is deferred until pending requests are ack'ed.
+ * We therefore need to store the id from the original request.
+ */
+ u8 disconnect_rspid;
+ usbif_priv_t *hash_next;
+ struct list_head usbif_list;
+ spinlock_t usb_ring_lock;
+ atomic_t refcnt;
+
+ struct work_struct work;
+};
+
+void usbif_create(usbif_be_create_t *create);
+void usbif_destroy(usbif_be_destroy_t *destroy);
+void usbif_connect(usbif_be_connect_t *connect);
+int usbif_disconnect(usbif_be_disconnect_t *disconnect, u8 rsp_id);
+void usbif_disconnect_complete(usbif_priv_t *up);
+
+void usbif_release_port(usbif_be_release_port_t *msg);
+int usbif_claim_port(usbif_be_claim_port_t *msg);
+void usbif_release_ports(usbif_priv_t *up);
+
+usbif_priv_t *usbif_find(domid_t domid);
+#define usbif_get(_b) (atomic_inc(&(_b)->refcnt))
+#define usbif_put(_b) \
+ do { \
+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
+ usbif_disconnect_complete(_b); \
+ } while (0)
+
+
+void usbif_interface_init(void);
+void usbif_ctrlif_init(void);
+
+void usbif_deschedule(usbif_priv_t *up);
+void remove_from_usbif_list(usbif_priv_t *up);
+
+irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs);
+
+#endif /* __USBIF__BACKEND__COMMON_H__ */
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/usbback/control.c b/linux-2.6.11-xen-sparse/drivers/xen/usbback/control.c
new file mode 100644
index 0000000000..b46b16d8bc
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/usbback/control.c
@@ -0,0 +1,61 @@
+/******************************************************************************
+ * arch/xen/drivers/usbif/backend/control.c
+ *
+ * Routines for interfacing with the control plane.
+ *
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+
+static void usbif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+ DPRINTK("Received usbif backend message, subtype=%d\n", msg->subtype);
+
+ switch ( msg->subtype )
+ {
+ case CMSG_USBIF_BE_CREATE:
+ usbif_create((usbif_be_create_t *)&msg->msg[0]);
+ break;
+ case CMSG_USBIF_BE_DESTROY:
+ usbif_destroy((usbif_be_destroy_t *)&msg->msg[0]);
+ break;
+ case CMSG_USBIF_BE_CONNECT:
+ usbif_connect((usbif_be_connect_t *)&msg->msg[0]);
+ break;
+ case CMSG_USBIF_BE_DISCONNECT:
+ if ( !usbif_disconnect((usbif_be_disconnect_t *)&msg->msg[0],msg->id) )
+ return; /* Sending the response is deferred until later. */
+ break;
+ case CMSG_USBIF_BE_CLAIM_PORT:
+ usbif_claim_port((usbif_be_claim_port_t *)&msg->msg[0]);
+ break;
+ case CMSG_USBIF_BE_RELEASE_PORT:
+ usbif_release_port((usbif_be_release_port_t *)&msg->msg[0]);
+ break;
+ default:
+ DPRINTK("Parse error while reading message subtype %d, len %d\n",
+ msg->subtype, msg->length);
+ msg->length = 0;
+ break;
+ }
+
+ ctrl_if_send_response(msg);
+}
+
+void usbif_ctrlif_init(void)
+{
+ ctrl_msg_t cmsg;
+ usbif_be_driver_status_changed_t st;
+
+ (void)ctrl_if_register_receiver(CMSG_USBIF_BE, usbif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+
+ /* Send a driver-UP notification to the domain controller. */
+ cmsg.type = CMSG_USBIF_BE;
+ cmsg.subtype = CMSG_USBIF_BE_DRIVER_STATUS_CHANGED;
+ cmsg.length = sizeof(usbif_be_driver_status_changed_t);
+ st.status = USBIF_DRIVER_STATUS_UP;
+ memcpy(cmsg.msg, &st, sizeof(st));
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/usbback/interface.c b/linux-2.6.11-xen-sparse/drivers/xen/usbback/interface.c
new file mode 100644
index 0000000000..c1a16e8000
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/usbback/interface.c
@@ -0,0 +1,252 @@
+/******************************************************************************
+ * arch/xen/drivers/usbif/backend/interface.c
+ *
+ * USB device interface management.
+ *
+ * by Mark Williamson, Copyright (c) 2004
+ */
+
+
+/******************************************************************************
+ * arch/xen/drivers/blkif/backend/interface.c
+ *
+ * Block-device interface management.
+ *
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+
+#define USBIF_HASHSZ 1024
+#define USBIF_HASH(_d) (((int)(_d))&(USBIF_HASHSZ-1))
+
+static kmem_cache_t *usbif_priv_cachep;
+static usbif_priv_t *usbif_priv_hash[USBIF_HASHSZ];
+
+usbif_priv_t *usbif_find(domid_t domid)
+{
+ usbif_priv_t *up = usbif_priv_hash[USBIF_HASH(domid)];
+ while ( (up != NULL ) && ( up->domid != domid ) )
+ up = up->hash_next;
+ return up;
+}
+
+static void __usbif_disconnect_complete(void *arg)
+{
+ usbif_priv_t *usbif = (usbif_priv_t *)arg;
+ ctrl_msg_t cmsg;
+ usbif_be_disconnect_t disc;
+
+ /*
+ * These can't be done in usbif_disconnect() because at that point there
+ * may be outstanding requests at the device whose asynchronous responses
+ * must still be notified to the remote driver.
+ */
+ unbind_evtchn_from_irq(usbif->evtchn);
+ vfree(usbif->usb_ring.sring);
+
+ /* Construct the deferred response message. */
+ cmsg.type = CMSG_USBIF_BE;
+ cmsg.subtype = CMSG_USBIF_BE_DISCONNECT;
+ cmsg.id = usbif->disconnect_rspid;
+ cmsg.length = sizeof(usbif_be_disconnect_t);
+ disc.domid = usbif->domid;
+ disc.status = USBIF_BE_STATUS_OKAY;
+ memcpy(cmsg.msg, &disc, sizeof(disc));
+
+ /*
+ * Make sure message is constructed /before/ status change, because
+ * after the status change the 'usbif' structure could be deallocated at
+ * any time. Also make sure we send the response /after/ status change,
+ * as otherwise a subsequent CONNECT request could spuriously fail if
+ * another CPU doesn't see the status change yet.
+ */
+ mb();
+ if ( usbif->status != DISCONNECTING )
+ BUG();
+ usbif->status = DISCONNECTED;
+ mb();
+
+ /* Send the successful response. */
+ ctrl_if_send_response(&cmsg);
+}
+
+void usbif_disconnect_complete(usbif_priv_t *up)
+{
+ INIT_WORK(&up->work, __usbif_disconnect_complete, (void *)up);
+ schedule_work(&up->work);
+}
+
+void usbif_create(usbif_be_create_t *create)
+{
+ domid_t domid = create->domid;
+ usbif_priv_t **pup, *up;
+
+ if ( (up = kmem_cache_alloc(usbif_priv_cachep, GFP_KERNEL)) == NULL )
+ {
+ DPRINTK("Could not create usbif: out of memory\n");
+ create->status = USBIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ memset(up, 0, sizeof(*up));
+ up->domid = domid;
+ up->status = DISCONNECTED;
+ spin_lock_init(&up->usb_ring_lock);
+ atomic_set(&up->refcnt, 0);
+
+ pup = &usbif_priv_hash[USBIF_HASH(domid)];
+ while ( *pup != NULL )
+ {
+ if ( (*pup)->domid == domid )
+ {
+ create->status = USBIF_BE_STATUS_INTERFACE_EXISTS;
+ kmem_cache_free(usbif_priv_cachep, up);
+ return;
+ }
+ pup = &(*pup)->hash_next;
+ }
+
+ up->hash_next = *pup;
+ *pup = up;
+
+ create->status = USBIF_BE_STATUS_OKAY;
+}
+
+void usbif_destroy(usbif_be_destroy_t *destroy)
+{
+ domid_t domid = destroy->domid;
+ usbif_priv_t **pup, *up;
+
+ pup = &usbif_priv_hash[USBIF_HASH(domid)];
+ while ( (up = *pup) != NULL )
+ {
+ if ( up->domid == domid )
+ {
+ if ( up->status != DISCONNECTED )
+ goto still_connected;
+ goto destroy;
+ }
+ pup = &up->hash_next;
+ }
+
+ destroy->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+
+ still_connected:
+ destroy->status = USBIF_BE_STATUS_INTERFACE_CONNECTED;
+ return;
+
+ destroy:
+ *pup = up->hash_next;
+ usbif_release_ports(up);
+ kmem_cache_free(usbif_priv_cachep, up);
+ destroy->status = USBIF_BE_STATUS_OKAY;
+}
+
+void usbif_connect(usbif_be_connect_t *connect)
+{
+ domid_t domid = connect->domid;
+ unsigned int evtchn = connect->evtchn;
+ unsigned long shmem_frame = connect->shmem_frame;
+ struct vm_struct *vma;
+ pgprot_t prot;
+ int error;
+ usbif_priv_t *up;
+ usbif_sring_t *sring;
+
+ up = usbif_find(domid);
+ if ( unlikely(up == NULL) )
+ {
+ DPRINTK("usbif_connect attempted for non-existent usbif (%u)\n",
+ connect->domid);
+ connect->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL )
+ {
+ connect->status = USBIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
+ error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
+ shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+ prot, domid);
+ if ( error != 0 )
+ {
+ if ( error == -ENOMEM )
+ connect->status = USBIF_BE_STATUS_OUT_OF_MEMORY;
+ else if ( error == -EFAULT )
+ connect->status = USBIF_BE_STATUS_MAPPING_ERROR;
+ else
+ connect->status = USBIF_BE_STATUS_ERROR;
+ vfree(vma->addr);
+ return;
+ }
+
+ if ( up->status != DISCONNECTED )
+ {
+ connect->status = USBIF_BE_STATUS_INTERFACE_CONNECTED;
+ vfree(vma->addr);
+ return;
+ }
+
+ sring = (usbif_sring_t *)vma->addr;
+ SHARED_RING_INIT(sring);
+ BACK_RING_INIT(&up->usb_ring, sring, PAGE_SIZE);
+
+ up->evtchn = evtchn;
+ up->irq = bind_evtchn_to_irq(evtchn);
+ up->shmem_frame = shmem_frame;
+ up->status = CONNECTED;
+ usbif_get(up);
+
+ request_irq(up->irq, usbif_be_int, 0, "usbif-backend", up);
+
+ connect->status = USBIF_BE_STATUS_OKAY;
+}
+
+/* Remove URBs for this interface before destroying it. */
+void usbif_deschedule(usbif_priv_t *up)
+{
+ remove_from_usbif_list(up);
+}
+
+int usbif_disconnect(usbif_be_disconnect_t *disconnect, u8 rsp_id)
+{
+ domid_t domid = disconnect->domid;
+ usbif_priv_t *up;
+
+ up = usbif_find(domid);
+ if ( unlikely(up == NULL) )
+ {
+ DPRINTK("usbif_disconnect attempted for non-existent usbif"
+ " (%u)\n", disconnect->domid);
+ disconnect->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return 1; /* Caller will send response error message. */
+ }
+
+ if ( up->status == CONNECTED )
+ {
+ up->status = DISCONNECTING;
+ up->disconnect_rspid = rsp_id;
+ wmb(); /* Let other CPUs see the status change. */
+ free_irq(up->irq, up);
+ usbif_deschedule(up);
+ usbif_put(up);
+ return 0; /* Caller should not send response message. */
+ }
+
+ disconnect->status = USBIF_BE_STATUS_OKAY;
+ return 1;
+}
+
+void __init usbif_interface_init(void)
+{
+ usbif_priv_cachep = kmem_cache_create("usbif_priv_cache",
+ sizeof(usbif_priv_t),
+ 0, 0, NULL, NULL);
+ memset(usbif_priv_hash, 0, sizeof(usbif_priv_hash));
+}
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/usbback/usbback.c b/linux-2.6.11-xen-sparse/drivers/xen/usbback/usbback.c
new file mode 100644
index 0000000000..42439405cd
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/usbback/usbback.c
@@ -0,0 +1,1070 @@
+/******************************************************************************
+ * arch/xen/drivers/usbif/backend/main.c
+ *
+ * Backend for the Xen virtual USB driver - provides an abstraction of a
+ * USB host controller to the corresponding frontend driver.
+ *
+ * by Mark Williamson
+ * Copyright (c) 2004 Intel Research Cambridge
+ * Copyright (c) 2004, 2005 Mark Williamson
+ *
+ * Based on arch/xen/drivers/blkif/backend/main.c
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ */
+
+#include "common.h"
+
+
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/tqueue.h>
+
+/*
+ * This is rather arbitrary.
+ */
+#define MAX_PENDING_REQS 4
+#define BATCH_PER_DOMAIN 1
+
+static unsigned long mmap_vstart;
+
+/* Needs to be sufficiently large that we can map the (large) buffers
+ * the USB mass storage driver wants. */
+#define MMAP_PAGES_PER_REQUEST \
+ (128)
+#define MMAP_PAGES \
+ (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
+
+#define MMAP_VADDR(_req,_seg) \
+ (mmap_vstart + \
+ ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
+ ((_seg) * PAGE_SIZE))
+
+
+static spinlock_t owned_ports_lock;
+LIST_HEAD(owned_ports);
+
+/* A list of these structures is used to track ownership of physical USB
+ * ports. */
+typedef struct
+{
+ usbif_priv_t *usbif_priv;
+ char path[16];
+ int guest_port;
+ int enabled;
+ struct list_head list;
+ unsigned long guest_address; /* The USB device address that has been
+ * assigned by the guest. */
+ int dev_present; /* Is there a device present? */
+ struct usb_device * dev;
+ unsigned long ifaces; /* What interfaces are present on this device? */
+} owned_port_t;
+
+
+/*
+ * Each outstanding request that we've passed to the lower device layers has a
+ * 'pending_req' allocated to it. The request is complete, the specified
+ * domain has a response queued for it, with the saved 'id' passed back.
+ */
+typedef struct {
+ usbif_priv_t *usbif_priv;
+ unsigned long id;
+ int nr_pages;
+ unsigned short operation;
+ int status;
+} pending_req_t;
+
+/*
+ * We can't allocate pending_req's in order, since they may complete out of
+ * order. We therefore maintain an allocation ring. This ring also indicates
+ * when enough work has been passed down -- at that point the allocation ring
+ * will be empty.
+ */
+static pending_req_t pending_reqs[MAX_PENDING_REQS];
+static unsigned char pending_ring[MAX_PENDING_REQS];
+static spinlock_t pend_prod_lock;
+
+/* NB. We use a different index type to differentiate from shared usb rings. */
+typedef unsigned int PEND_RING_IDX;
+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
+static PEND_RING_IDX pending_prod, pending_cons;
+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
+static int do_usb_io_op(usbif_priv_t *usbif, int max_to_do);
+static void make_response(usbif_priv_t *usbif, unsigned long id,
+ unsigned short op, int st, int inband,
+ unsigned long actual_length);
+static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long port);
+static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req);
+static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid);
+static owned_port_t *usbif_find_port(char *);
+
+/******************************************************************
+ * PRIVATE DEBUG FUNCTIONS
+ */
+
+#undef DEBUG
+#ifdef DEBUG
+
+static void dump_port(owned_port_t *p)
+{
+ printk(KERN_DEBUG "owned_port_t @ %p\n"
+ " usbif_priv @ %p\n"
+ " path: %s\n"
+ " guest_port: %d\n"
+ " guest_address: %ld\n"
+ " dev_present: %d\n"
+ " dev @ %p\n"
+ " ifaces: 0x%lx\n",
+ p, p->usbif_priv, p->path, p->guest_port, p->guest_address,
+ p->dev_present, p->dev, p->ifaces);
+}
+
+
+static void dump_request(usbif_request_t *req)
+{
+ printk(KERN_DEBUG "id = 0x%lx\n"
+ "devnum %d\n"
+ "endpoint 0x%x\n"
+ "direction %d\n"
+ "speed %d\n"
+ "pipe_type 0x%x\n"
+ "transfer_buffer 0x%lx\n"
+ "length 0x%lx\n"
+ "transfer_flags 0x%lx\n"
+ "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n"
+ "iso_schedule = 0x%lx\n"
+ "num_iso %ld\n",
+ req->id, req->devnum, req->endpoint, req->direction, req->speed,
+ req->pipe_type, req->transfer_buffer, req->length,
+ req->transfer_flags, req->setup[0], req->setup[1], req->setup[2],
+ req->setup[3], req->setup[4], req->setup[5], req->setup[6],
+ req->setup[7], req->iso_schedule, req->num_iso);
+}
+
+static void dump_urb(struct urb *urb)
+{
+ printk(KERN_DEBUG "dumping urb @ %p\n", urb);
+
+#define DUMP_URB_FIELD(name, format) \
+ printk(KERN_DEBUG " " # name " " format "\n", urb-> name)
+
+ DUMP_URB_FIELD(pipe, "0x%x");
+ DUMP_URB_FIELD(status, "%d");
+ DUMP_URB_FIELD(transfer_flags, "0x%x");
+ DUMP_URB_FIELD(transfer_buffer, "%p");
+ DUMP_URB_FIELD(transfer_buffer_length, "%d");
+ DUMP_URB_FIELD(actual_length, "%d");
+}
+
+static void dump_response(usbif_response_t *resp)
+{
+ printk(KERN_DEBUG "usbback: Sending response:\n"
+ " id = 0x%x\n"
+ " op = %d\n"
+ " status = %d\n"
+ " data = %d\n"
+ " length = %d\n",
+ resp->id, resp->op, resp->status, resp->data, resp->length);
+}
+
+#else /* DEBUG */
+
+#define dump_port(blah) ((void)0)
+#define dump_request(blah) ((void)0)
+#define dump_urb(blah) ((void)0)
+#define dump_response(blah) ((void)0)
+
+#endif /* DEBUG */
+
+/******************************************************************
+ * MEMORY MANAGEMENT
+ */
+
+static void fast_flush_area(int idx, int nr_pages)
+{
+ multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
+ int i;
+
+ for ( i = 0; i < nr_pages; i++ )
+ {
+ mcl[i].op = __HYPERVISOR_update_va_mapping;
+ mcl[i].args[0] = MMAP_VADDR(idx, i);
+ mcl[i].args[1] = 0;
+ mcl[i].args[2] = 0;
+ }
+
+ mcl[nr_pages-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
+ if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
+ BUG();
+}
+
+
+/******************************************************************
+ * USB INTERFACE SCHEDULER LIST MAINTENANCE
+ */
+
+static struct list_head usbio_schedule_list;
+static spinlock_t usbio_schedule_list_lock;
+
+static int __on_usbif_list(usbif_priv_t *up)
+{
+ return up->usbif_list.next != NULL;
+}
+
+void remove_from_usbif_list(usbif_priv_t *up)
+{
+ unsigned long flags;
+ if ( !__on_usbif_list(up) ) return;
+ spin_lock_irqsave(&usbio_schedule_list_lock, flags);
+ if ( __on_usbif_list(up) )
+ {
+ list_del(&up->usbif_list);
+ up->usbif_list.next = NULL;
+ usbif_put(up);
+ }
+ spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
+}
+
+static void add_to_usbif_list_tail(usbif_priv_t *up)
+{
+ unsigned long flags;
+ if ( __on_usbif_list(up) ) return;
+ spin_lock_irqsave(&usbio_schedule_list_lock, flags);
+ if ( !__on_usbif_list(up) && (up->status == CONNECTED) )
+ {
+ list_add_tail(&up->usbif_list, &usbio_schedule_list);
+ usbif_get(up);
+ }
+ spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
+}
+
+void free_pending(int pending_idx)
+{
+ unsigned long flags;
+
+ /* Free the pending request. */
+ spin_lock_irqsave(&pend_prod_lock, flags);
+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+ spin_unlock_irqrestore(&pend_prod_lock, flags);
+}
+
+/******************************************************************
+ * COMPLETION CALLBACK -- Called as urb->complete()
+ */
+
+static void maybe_trigger_usbio_schedule(void);
+
+static void __end_usb_io_op(struct urb *purb)
+{
+ pending_req_t *pending_req;
+ int pending_idx;
+
+ pending_req = purb->context;
+
+ pending_idx = pending_req - pending_reqs;
+
+ ASSERT(purb->actual_length <= purb->transfer_buffer_length);
+ ASSERT(purb->actual_length <= pending_req->nr_pages * PAGE_SIZE);
+
+ /* An error fails the entire request. */
+ if ( purb->status )
+ {
+ printk(KERN_WARNING "URB @ %p failed. Status %d\n", purb, purb->status);
+ }
+
+ if ( usb_pipetype(purb->pipe) == 0 )
+ {
+ int i;
+ usbif_iso_t *sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, pending_req->nr_pages - 1);
+
+ /* If we're dealing with an iso pipe, we need to copy back the schedule. */
+ for ( i = 0; i < purb->number_of_packets; i++ )
+ {
+ sched[i].length = purb->iso_frame_desc[i].actual_length;
+ ASSERT(sched[i].buffer_offset ==
+ purb->iso_frame_desc[i].offset);
+ sched[i].status = purb->iso_frame_desc[i].status;
+ }
+ }
+
+ fast_flush_area(pending_req - pending_reqs, pending_req->nr_pages);
+
+ kfree(purb->setup_packet);
+
+ make_response(pending_req->usbif_priv, pending_req->id,
+ pending_req->operation, pending_req->status, 0, purb->actual_length);
+ usbif_put(pending_req->usbif_priv);
+
+ usb_free_urb(purb);
+
+ free_pending(pending_idx);
+
+ rmb();
+
+ /* Check for anything still waiting in the rings, having freed a request... */
+ maybe_trigger_usbio_schedule();
+}
+
+/******************************************************************
+ * SCHEDULER FUNCTIONS
+ */
+
+static DECLARE_WAIT_QUEUE_HEAD(usbio_schedule_wait);
+
+static int usbio_schedule(void *arg)
+{
+ DECLARE_WAITQUEUE(wq, current);
+
+ usbif_priv_t *up;
+ struct list_head *ent;
+
+ daemonize();
+
+ for ( ; ; )
+ {
+ /* Wait for work to do. */
+ add_wait_queue(&usbio_schedule_wait, &wq);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if ( (NR_PENDING_REQS == MAX_PENDING_REQS) ||
+ list_empty(&usbio_schedule_list) )
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&usbio_schedule_wait, &wq);
+
+ /* Queue up a batch of requests. */
+ while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
+ !list_empty(&usbio_schedule_list) )
+ {
+ ent = usbio_schedule_list.next;
+ up = list_entry(ent, usbif_priv_t, usbif_list);
+ usbif_get(up);
+ remove_from_usbif_list(up);
+ if ( do_usb_io_op(up, BATCH_PER_DOMAIN) )
+ add_to_usbif_list_tail(up);
+ usbif_put(up);
+ }
+ }
+}
+
+static void maybe_trigger_usbio_schedule(void)
+{
+ /*
+ * Needed so that two processes, who together make the following predicate
+ * true, don't both read stale values and evaluate the predicate
+ * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
+ */
+ smp_mb();
+
+ if ( !list_empty(&usbio_schedule_list) )
+ wake_up(&usbio_schedule_wait);
+}
+
+
+/******************************************************************************
+ * NOTIFICATION FROM GUEST OS.
+ */
+
+irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+ usbif_priv_t *up = dev_id;
+
+ smp_mb();
+
+ add_to_usbif_list_tail(up);
+
+ /* Will in fact /always/ trigger an io schedule in this case. */
+ maybe_trigger_usbio_schedule();
+
+ return IRQ_HANDLED;
+}
+
+
+
+/******************************************************************
+ * DOWNWARD CALLS -- These interface with the usb-device layer proper.
+ */
+
+static int do_usb_io_op(usbif_priv_t *up, int max_to_do)
+{
+ usbif_back_ring_t *usb_ring = &up->usb_ring;
+ usbif_request_t *req;
+ RING_IDX i, rp;
+ int more_to_do = 0;
+
+ rp = usb_ring->sring->req_prod;
+ rmb(); /* Ensure we see queued requests up to 'rp'. */
+
+ /* Take items off the comms ring, taking care not to overflow. */
+ for ( i = usb_ring->req_cons;
+ (i != rp) && !RING_REQUEST_CONS_OVERFLOW(usb_ring, i);
+ i++ )
+ {
+ if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
+ {
+ more_to_do = 1;
+ break;
+ }
+
+ req = RING_GET_REQUEST(usb_ring, i);
+
+ switch ( req->operation )
+ {
+ case USBIF_OP_PROBE:
+ dispatch_usb_probe(up, req->id, req->port);
+ break;
+
+ case USBIF_OP_IO:
+ /* Assemble an appropriate URB. */
+ dispatch_usb_io(up, req);
+ break;
+
+ case USBIF_OP_RESET:
+ dispatch_usb_reset(up, req->port);
+ break;
+
+ default:
+ DPRINTK("error: unknown USB io operation [%d]\n",
+ req->operation);
+ make_response(up, req->id, req->operation, -EINVAL, 0, 0);
+ break;
+ }
+ }
+
+ usb_ring->req_cons = i;
+
+ return more_to_do;
+}
+
+static owned_port_t *find_guest_port(usbif_priv_t *up, int port)
+{
+ unsigned long flags;
+ struct list_head *l;
+
+ spin_lock_irqsave(&owned_ports_lock, flags);
+ list_for_each(l, &owned_ports)
+ {
+ owned_port_t *p = list_entry(l, owned_port_t, list);
+ if(p->usbif_priv == up && p->guest_port == port)
+ {
+ spin_unlock_irqrestore(&owned_ports_lock, flags);
+ return p;
+ }
+ }
+ spin_unlock_irqrestore(&owned_ports_lock, flags);
+
+ return NULL;
+}
+
+static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid)
+{
+ owned_port_t *port = find_guest_port(up, portid);
+ int ret = 0;
+
+
+ /* Allowing the guest to actually reset the device causes more problems
+ * than it's worth. We just fake it out in software but we will do a real
+ * reset when the interface is destroyed. */
+
+ dump_port(port);
+
+ port->guest_address = 0;
+ /* If there's an attached device then the port is now enabled. */
+ if ( port->dev_present )
+ port->enabled = 1;
+ else
+ port->enabled = 0;
+
+ make_response(up, 0, USBIF_OP_RESET, ret, 0, 0);
+}
+
+static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long portid)
+{
+ owned_port_t *port = find_guest_port(up, portid);
+ int ret;
+
+ if ( port != NULL )
+ ret = port->dev_present;
+ else
+ {
+ ret = -EINVAL;
+ printk(KERN_INFO "dispatch_usb_probe(): invalid port probe request "
+ "(port %ld)\n", portid);
+ }
+
+ /* Probe result is sent back in-band. Probes don't have an associated id
+ * right now... */
+ make_response(up, id, USBIF_OP_PROBE, ret, portid, 0);
+}
+
+/**
+ * check_iso_schedule - safety check the isochronous schedule for an URB
+ * @purb : the URB in question
+ */
+static int check_iso_schedule(struct urb *purb)
+{
+ int i;
+ unsigned long total_length = 0;
+
+ for ( i = 0; i < purb->number_of_packets; i++ )
+ {
+ struct usb_iso_packet_descriptor *desc = &purb->iso_frame_desc[i];
+
+ if ( desc->offset >= purb->transfer_buffer_length
+ || ( desc->offset + desc->length) > purb->transfer_buffer_length )
+ return -EINVAL;
+
+ total_length += desc->length;
+
+ if ( total_length > purb->transfer_buffer_length )
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req);
+
+static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req)
+{
+ unsigned long buffer_mach;
+ int i = 0, offset = 0,
+ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+ pending_req_t *pending_req;
+ unsigned long remap_prot;
+ multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
+ struct urb *purb = NULL;
+ owned_port_t *port;
+ unsigned char *setup;
+
+ dump_request(req);
+
+ if ( NR_PENDING_REQS == MAX_PENDING_REQS )
+ {
+ printk(KERN_WARNING "usbback: Max requests already queued. "
+ "Giving up!\n");
+
+ return;
+ }
+
+ port = find_port_for_request(up, req);
+
+ if ( port == NULL )
+ {
+ printk(KERN_WARNING "No such device! (%d)\n", req->devnum);
+ dump_request(req);
+
+ make_response(up, req->id, req->operation, -ENODEV, 0, 0);
+ return;
+ }
+ else if ( !port->dev_present )
+ {
+ /* In normal operation, we'll only get here if a device is unplugged
+ * and the frontend hasn't noticed yet. */
+ make_response(up, req->id, req->operation, -ENODEV, 0, 0);
+ return;
+ }
+
+
+ setup = kmalloc(8, GFP_KERNEL);
+
+ if ( setup == NULL )
+ goto no_mem;
+
+ /* Copy request out for safety. */
+ memcpy(setup, req->setup, 8);
+
+ if( setup[0] == 0x0 && setup[1] == 0x5)
+ {
+ /* To virtualise the USB address space, we need to intercept
+ * set_address messages and emulate. From the USB specification:
+ * bmRequestType = 0x0;
+ * Brequest = SET_ADDRESS (i.e. 0x5)
+ * wValue = device address
+ * wIndex = 0
+ * wLength = 0
+ * data = None
+ */
+ /* Store into the guest transfer buffer using cpu_to_le16 */
+ port->guest_address = le16_to_cpu(*(u16 *)(setup + 2));
+ /* Make a successful response. That was easy! */
+
+ make_response(up, req->id, req->operation, 0, 0, 0);
+
+ kfree(setup);
+ return;
+ }
+ else if ( setup[0] == 0x0 && setup[1] == 0x9 )
+ {
+ /* The host kernel needs to know what device configuration is in use
+ * because various error checks get confused otherwise. We just do
+ * configuration settings here, under controlled conditions.
+ */
+
+ /* Ignore configuration setting and hope that the host kernel
+ did it right. */
+ /* usb_set_configuration(port->dev, setup[2]); */
+
+ make_response(up, req->id, req->operation, 0, 0, 0);
+
+ kfree(setup);
+ return;
+ }
+ else if ( setup[0] == 0x1 && setup[1] == 0xB )
+ {
+ /* The host kernel needs to know what device interface is in use
+ * because various error checks get confused otherwise. We just do
+ * configuration settings here, under controlled conditions.
+ */
+ usb_set_interface(port->dev, (setup[4] | setup[5] << 8),
+ (setup[2] | setup[3] << 8) );
+
+ make_response(up, req->id, req->operation, 0, 0, 0);
+
+ kfree(setup);
+ return;
+ }
+
+ if ( ( req->transfer_buffer - (req->transfer_buffer & PAGE_MASK)
+ + req->length )
+ > MMAP_PAGES_PER_REQUEST * PAGE_SIZE )
+ {
+ printk(KERN_WARNING "usbback: request of %lu bytes too large\n",
+ req->length);
+ make_response(up, req->id, req->operation, -EINVAL, 0, 0);
+ kfree(setup);
+ return;
+ }
+
+ buffer_mach = req->transfer_buffer;
+
+ if( buffer_mach == 0 )
+ goto no_remap;
+
+ ASSERT((req->length >> PAGE_SHIFT) <= MMAP_PAGES_PER_REQUEST);
+ ASSERT(buffer_mach);
+
+ /* Always map writeable for now. */
+ remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
+
+ for ( i = 0, offset = 0; offset < req->length;
+ i++, offset += PAGE_SIZE )
+ {
+ mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
+ mcl[i].args[0] = MMAP_VADDR(pending_idx, i);
+ mcl[i].args[1] = ((buffer_mach & PAGE_MASK) + offset) | remap_prot;
+ mcl[i].args[2] = 0;
+ mcl[i].args[3] = up->domid;
+
+ phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
+ FOREIGN_FRAME((buffer_mach + offset) >> PAGE_SHIFT);
+
+ ASSERT(virt_to_machine(MMAP_VADDR(pending_idx, i))
+ == buffer_mach + i << PAGE_SHIFT);
+ }
+
+ if ( req->pipe_type == 0 && req->num_iso > 0 ) /* Maybe schedule ISO... */
+ {
+ /* Map in ISO schedule, if necessary. */
+ mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
+ mcl[i].args[0] = MMAP_VADDR(pending_idx, i);
+ mcl[i].args[1] = (req->iso_schedule & PAGE_MASK) | remap_prot;
+ mcl[i].args[2] = 0;
+ mcl[i].args[3] = up->domid;
+
+ phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
+ FOREIGN_FRAME(req->iso_schedule >> PAGE_SHIFT);
+
+ i++;
+ }
+
+ if ( unlikely(HYPERVISOR_multicall(mcl, i) != 0) )
+ BUG();
+
+ {
+ int j;
+ for ( j = 0; j < i; j++ )
+ {
+ if ( unlikely(mcl[j].args[5] != 0) )
+ {
+ printk(KERN_WARNING
+ "invalid buffer %d -- could not remap it\n", j);
+ fast_flush_area(pending_idx, i);
+ goto bad_descriptor;
+ }
+ }
+ }
+
+ no_remap:
+
+ ASSERT(i <= MMAP_PAGES_PER_REQUEST);
+ ASSERT(i * PAGE_SIZE >= req->length);
+
+ /* We have to do this because some things might complete out of order. */
+ pending_req = &pending_reqs[pending_idx];
+ pending_req->usbif_priv= up;
+ pending_req->id = req->id;
+ pending_req->operation = req->operation;
+ pending_req->nr_pages = i;
+
+ pending_cons++;
+
+ usbif_get(up);
+
+ /* Fill out an actual request for the USB layer. */
+ purb = usb_alloc_urb(req->num_iso);
+
+ if ( purb == NULL )
+ {
+ usbif_put(up);
+ free_pending(pending_idx);
+ goto no_mem;
+ }
+
+ purb->dev = port->dev;
+ purb->context = pending_req;
+ purb->transfer_buffer =
+ (void *)(MMAP_VADDR(pending_idx, 0) + (buffer_mach & ~PAGE_MASK));
+ if(buffer_mach == 0)
+ purb->transfer_buffer = NULL;
+ purb->complete = __end_usb_io_op;
+ purb->transfer_buffer_length = req->length;
+ purb->transfer_flags = req->transfer_flags;
+
+ purb->pipe = 0;
+ purb->pipe |= req->direction << 7;
+ purb->pipe |= port->dev->devnum << 8;
+ purb->pipe |= req->speed << 26;
+ purb->pipe |= req->pipe_type << 30;
+ purb->pipe |= req->endpoint << 15;
+
+ purb->number_of_packets = req->num_iso;
+
+ if ( purb->number_of_packets * sizeof(usbif_iso_t) > PAGE_SIZE )
+ goto urb_error;
+
+ /* Make sure there's always some kind of timeout. */
+ purb->timeout = ( req->timeout > 0 ) ? (req->timeout * HZ) / 1000
+ : 1000;
+
+ purb->setup_packet = setup;
+
+ if ( req->pipe_type == 0 ) /* ISO */
+ {
+ int j;
+ usbif_iso_t *iso_sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, i - 1);
+
+ /* If we're dealing with an iso pipe, we need to copy in a schedule. */
+ for ( j = 0; j < purb->number_of_packets; j++ )
+ {
+ purb->iso_frame_desc[j].length = iso_sched[j].length;
+ purb->iso_frame_desc[j].offset = iso_sched[j].buffer_offset;
+ iso_sched[j].status = 0;
+ }
+ }
+
+ if ( check_iso_schedule(purb) != 0 )
+ goto urb_error;
+
+ if ( usb_submit_urb(purb) != 0 )
+ goto urb_error;
+
+ return;
+
+ urb_error:
+ dump_urb(purb);
+ usbif_put(up);
+ free_pending(pending_idx);
+
+ bad_descriptor:
+ kfree ( setup );
+ if ( purb != NULL )
+ usb_free_urb(purb);
+ make_response(up, req->id, req->operation, -EINVAL, 0, 0);
+ return;
+
+ no_mem:
+ if ( setup != NULL )
+ kfree(setup);
+ make_response(up, req->id, req->operation, -ENOMEM, 0, 0);
+ return;
+}
+
+
+
+/******************************************************************
+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
+ */
+
+
+static void make_response(usbif_priv_t *up, unsigned long id,
+ unsigned short op, int st, int inband,
+ unsigned long length)
+{
+ usbif_response_t *resp;
+ unsigned long flags;
+ usbif_back_ring_t *usb_ring = &up->usb_ring;
+
+ /* Place on the response ring for the relevant domain. */
+ spin_lock_irqsave(&up->usb_ring_lock, flags);
+ resp = RING_GET_RESPONSE(usb_ring, usb_ring->rsp_prod_pvt);
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+ resp->data = inband;
+ resp->length = length;
+ wmb(); /* Ensure other side can see the response fields. */
+
+ dump_response(resp);
+
+ usb_ring->rsp_prod_pvt++;
+ RING_PUSH_RESPONSES(usb_ring);
+ spin_unlock_irqrestore(&up->usb_ring_lock, flags);
+
+ /* Kick the relevant domain. */
+ notify_via_evtchn(up->evtchn);
+}
+
+/**
+ * usbif_claim_port - claim devices on a port on behalf of guest
+ *
+ * Once completed, this will ensure that any device attached to that
+ * port is claimed by this driver for use by the guest.
+ */
+int usbif_claim_port(usbif_be_claim_port_t *msg)
+{
+ owned_port_t *o_p;
+
+ /* Sanity... */
+ if ( usbif_find_port(msg->path) != NULL )
+ {
+ printk(KERN_WARNING "usbback: Attempted to claim USB port "
+ "we already own!\n");
+ return -EINVAL;
+ }
+
+ /* No need for a slab cache - this should be infrequent. */
+ o_p = kmalloc(sizeof(owned_port_t), GFP_KERNEL);
+
+ if ( o_p == NULL )
+ return -ENOMEM;
+
+ o_p->enabled = 0;
+ o_p->usbif_priv = usbif_find(msg->domid);
+ o_p->guest_port = msg->usbif_port;
+ o_p->dev_present = 0;
+ o_p->guest_address = 0; /* Default address. */
+
+ strcpy(o_p->path, msg->path);
+
+ spin_lock_irq(&owned_ports_lock);
+
+ list_add(&o_p->list, &owned_ports);
+
+ spin_unlock_irq(&owned_ports_lock);
+
+ printk(KERN_INFO "usbback: Claimed USB port (%s) for %d.%d\n", o_p->path,
+ msg->domid, msg->usbif_port);
+
+ /* Force a reprobe for unclaimed devices. */
+ usb_scan_devices();
+
+ return 0;
+}
+
+owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req)
+{
+ unsigned long flags;
+ struct list_head *port;
+
+ /* I'm assuming this is not called from IRQ context - correct? I think
+ * it's probably only called in response to control messages or plug events
+ * in the USB hub kernel thread, so should be OK. */
+ spin_lock_irqsave(&owned_ports_lock, flags);
+ list_for_each(port, &owned_ports)
+ {
+ owned_port_t *p = list_entry(port, owned_port_t, list);
+ if(p->usbif_priv == up && p->guest_address == req->devnum && p->enabled )
+ {
+ dump_port(p);
+
+ spin_unlock_irqrestore(&owned_ports_lock, flags);
+ return p;
+ }
+ }
+ spin_unlock_irqrestore(&owned_ports_lock, flags);
+
+ return NULL;
+}
+
+owned_port_t *__usbif_find_port(char *path)
+{
+ struct list_head *port;
+
+ list_for_each(port, &owned_ports)
+ {
+ owned_port_t *p = list_entry(port, owned_port_t, list);
+ if(!strcmp(path, p->path))
+ {
+ return p;
+ }
+ }
+
+ return NULL;
+}
+
+owned_port_t *usbif_find_port(char *path)
+{
+ owned_port_t *ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&owned_ports_lock, flags);
+ ret = __usbif_find_port(path);
+ spin_unlock_irqrestore(&owned_ports_lock, flags);
+
+ return ret;
+}
+
+
+static void *probe(struct usb_device *dev, unsigned iface,
+ const struct usb_device_id *id)
+{
+ owned_port_t *p;
+
+ /* We don't care what the device is - if we own the port, we want it. We
+ * don't deal with device-specifics in this driver, so we don't care what
+ * the device actually is ;-) */
+ if ( ( p = usbif_find_port(dev->devpath) ) != NULL )
+ {
+ printk(KERN_INFO "usbback: claimed device attached to owned port\n");
+
+ p->dev_present = 1;
+ p->dev = dev;
+ set_bit(iface, &p->ifaces);
+
+ return p->usbif_priv;
+ }
+ else
+ printk(KERN_INFO "usbback: hotplug for non-owned port (%s), ignoring\n",
+ dev->devpath);
+
+
+ return NULL;
+}
+
+static void disconnect(struct usb_device *dev, void *usbif)
+{
+ /* Note the device is removed so we can tell the guest when it probes. */
+ owned_port_t *port = usbif_find_port(dev->devpath);
+ port->dev_present = 0;
+ port->dev = NULL;
+ port->ifaces = 0;
+}
+
+
+struct usb_driver driver =
+{
+ .owner = THIS_MODULE,
+ .name = "Xen USB Backend",
+ .probe = probe,
+ .disconnect = disconnect,
+ .id_table = NULL,
+};
+
+/* __usbif_release_port - internal mechanics for releasing a port */
+void __usbif_release_port(owned_port_t *p)
+{
+ int i;
+
+ for ( i = 0; p->ifaces != 0; i++)
+ if ( p->ifaces & 1 << i )
+ {
+ usb_driver_release_interface(&driver, usb_ifnum_to_if(p->dev, i));
+ clear_bit(i, &p->ifaces);
+ }
+ list_del(&p->list);
+
+ /* Reset the real device. We don't simulate disconnect / probe for other
+ * drivers in this kernel because we assume the device is completely under
+ * the control of ourselves (i.e. the guest!). This should ensure that the
+ * device is in a sane state for the next customer ;-) */
+
+ /* MAW NB: we're not resetting the real device here. This looks perfectly
+ * valid to me but it causes memory corruption. We seem to get away with not
+ * resetting for now, although it'd be nice to have this tracked down. */
+/* if ( p->dev != NULL) */
+/* usb_reset_device(p->dev); */
+
+ kfree(p);
+}
+
+
+/**
+ * usbif_release_port - stop claiming devices on a port on behalf of guest
+ */
+void usbif_release_port(usbif_be_release_port_t *msg)
+{
+ owned_port_t *p;
+
+ spin_lock_irq(&owned_ports_lock);
+ p = __usbif_find_port(msg->path);
+ __usbif_release_port(p);
+ spin_unlock_irq(&owned_ports_lock);
+}
+
+void usbif_release_ports(usbif_priv_t *up)
+{
+ struct list_head *port, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&owned_ports_lock, flags);
+ list_for_each_safe(port, tmp, &owned_ports)
+ {
+ owned_port_t *p = list_entry(port, owned_port_t, list);
+ if ( p->usbif_priv == up )
+ __usbif_release_port(p);
+ }
+ spin_unlock_irqrestore(&owned_ports_lock, flags);
+}
+
+static int __init usbif_init(void)
+{
+ int i;
+
+ if ( !(xen_start_info.flags & SIF_INITDOMAIN) &&
+ !(xen_start_info.flags & SIF_USB_BE_DOMAIN) )
+ return 0;
+
+ if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
+ BUG();
+
+ pending_cons = 0;
+ pending_prod = MAX_PENDING_REQS;
+ memset(pending_reqs, 0, sizeof(pending_reqs));
+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
+ pending_ring[i] = i;
+
+ spin_lock_init(&pend_prod_lock);
+
+ spin_lock_init(&owned_ports_lock);
+ INIT_LIST_HEAD(&owned_ports);
+
+ spin_lock_init(&usbio_schedule_list_lock);
+ INIT_LIST_HEAD(&usbio_schedule_list);
+
+ if ( kernel_thread(usbio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
+ BUG();
+
+ usbif_interface_init();
+
+ usbif_ctrlif_init();
+
+ usb_register(&driver);
+
+ printk(KERN_INFO "Xen USB Backend Initialised");
+
+ return 0;
+}
+
+__initcall(usbif_init);
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/usbfront/usbfront.c b/linux-2.6.11-xen-sparse/drivers/xen/usbfront/usbfront.c
new file mode 100644
index 0000000000..6a517b13bc
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/usbfront/usbfront.c
@@ -0,0 +1,1738 @@
+/*
+ * Xen Virtual USB Frontend Driver
+ *
+ * This file contains the first version of the Xen virtual USB hub
+ * that I've managed not to delete by mistake (3rd time lucky!).
+ *
+ * Based on Linux's uhci.c, original copyright notices are displayed
+ * below. Portions also (c) 2004 Intel Research Cambridge
+ * and (c) 2004, 2005 Mark Williamson
+ *
+ * Contact <mark.williamson@cl.cam.ac.uk> or
+ * <xen-devel@lists.sourceforge.net> regarding this code.
+ *
+ * Still to be (maybe) implemented:
+ * - migration / backend restart support?
+ * - support for building / using as a module
+ */
+
+/*
+ * Universal Host Controller Interface driver for USB.
+ *
+ * Maintainer: Johannes Erdfelt <johannes@erdfelt.com>
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
+ * (C) Copyright 1999 Randy Dunlap
+ * (C) Copyright 1999 Georg Acher, acher@in.tum.de
+ * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
+ * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
+ * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
+ * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
+ * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
+ * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
+ *
+ * Intel documents this fairly well, and as far as I know there
+ * are no royalties or anything like that, but even so there are
+ * people who decided that they want to do the same thing in a
+ * completely different way.
+ *
+ * WARNING! The USB documentation is downright evil. Most of it
+ * is just crap, written by a committee. You're better off ignoring
+ * most of it, the important stuff is:
+ * - the low-level protocol (fairly simple but lots of small details)
+ * - working around the horridness of the rest
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#ifdef CONFIG_USB_DEBUG
+#define DEBUG
+#else
+#undef DEBUG
+#endif
+#include <linux/usb.h>
+
+#include <asm/irq.h>
+#include <asm/system.h>
+
+#include "xhci.h"
+
+#include "../../../../../drivers/usb/hcd.h"
+
+#include <asm-xen/xen-public/io/usbif.h>
+#include <asm/ctrl_if.h>
+#include <asm/xen-public/io/domain_controller.h>
+
+/*
+ * Version Information
+ */
+#define DRIVER_VERSION "v1.0"
+#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, " \
+ "Randy Dunlap, Georg Acher, Deti Fliegl, " \
+ "Thomas Sailer, Roman Weissgaerber, Mark Williamson"
+#define DRIVER_DESC "Xen Virtual USB Host Controller Interface"
+
+/*
+ * debug = 0, no debugging messages
+ * debug = 1, dump failed URB's except for stalls
+ * debug = 2, dump all failed URB's (including stalls)
+ */
+#ifdef DEBUG
+static int debug = 1;
+#else
+static int debug = 0;
+#endif
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "Debug level");
+static char *errbuf;
+#define ERRBUF_LEN (PAGE_SIZE * 8)
+
+static int rh_submit_urb(struct urb *urb);
+static int rh_unlink_urb(struct urb *urb);
+static int xhci_unlink_urb(struct urb *urb);
+static void xhci_call_completion(struct urb *urb);
+static void xhci_drain_ring(void);
+static void xhci_transfer_result(struct xhci *xhci, struct urb *urb);
+static void xhci_finish_completion(void);
+
+#define MAX_URB_LOOP 2048 /* Maximum number of linked URB's */
+
+static kmem_cache_t *xhci_up_cachep; /* urb_priv cache */
+static struct xhci *xhci; /* XHCI structure for the interface */
+
+/******************************************************************************
+ * DEBUGGING
+ */
+
+#ifdef DEBUG
+
+static void dump_urb(struct urb *urb)
+{
+ printk(KERN_DEBUG "dumping urb @ %p\n"
+ " hcpriv = %p\n"
+ " next = %p\n"
+ " dev = %p\n"
+ " pipe = 0x%lx\n"
+ " status = %d\n"
+ " transfer_flags = 0x%lx\n"
+ " transfer_buffer = %p\n"
+ " transfer_buffer_length = %d\n"
+ " actual_length = %d\n"
+ " bandwidth = %d\n"
+ " setup_packet = %p\n",
+ urb, urb->hcpriv, urb->next, urb->dev, urb->pipe, urb->status,
+ urb->transfer_flags, urb->transfer_buffer,
+ urb->transfer_buffer_length, urb->actual_length, urb->bandwidth,
+ urb->setup_packet);
+ if ( urb->setup_packet != NULL )
+ printk(KERN_DEBUG
+ "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n",
+ urb->setup_packet[0], urb->setup_packet[1],
+ urb->setup_packet[2], urb->setup_packet[3],
+ urb->setup_packet[4], urb->setup_packet[5],
+ urb->setup_packet[6], urb->setup_packet[7]);
+ printk(KERN_DEBUG "complete = %p\n"
+ "interval = %d\n", urb->complete, urb->interval);
+
+}
+
+static void xhci_show_resp(usbif_response_t *r)
+{
+ printk(KERN_DEBUG "dumping response @ %p\n"
+ " id=0x%lx\n"
+ " op=0x%x\n"
+ " data=0x%x\n"
+ " status=0x%x\n"
+ " length=0x%lx\n",
+ r->id, r->operation, r->data, r->status, r->length);
+}
+
+#define DPRINK(...) printk(KERN_DEBUG __VA_ARGS__)
+
+#else /* DEBUG */
+
+#define dump_urb(blah) ((void)0)
+#define xhci_show_resp(blah) ((void)0)
+#define DPRINTK(blah,...) ((void)0)
+
+#endif /* DEBUG */
+
+/******************************************************************************
+ * RING REQUEST HANDLING
+ */
+
+#define RING_PLUGGED(_hc) ( RING_FULL(&_hc->usb_ring) || _hc->recovery )
+
+/**
+ * xhci_construct_isoc - add isochronous information to a request
+ */
+static int xhci_construct_isoc(usbif_request_t *req, struct urb *urb)
+{
+ usbif_iso_t *schedule;
+ int i;
+ struct urb_priv *urb_priv = urb->hcpriv;
+
+ req->num_iso = urb->number_of_packets;
+ schedule = (usbif_iso_t *)__get_free_page(GFP_KERNEL);
+
+ if ( schedule == NULL )
+ return -ENOMEM;
+
+ for ( i = 0; i < req->num_iso; i++ )
+ {
+ schedule[i].buffer_offset = urb->iso_frame_desc[i].offset;
+ schedule[i].length = urb->iso_frame_desc[i].length;
+ }
+
+ urb_priv->schedule = schedule;
+ req->iso_schedule = virt_to_machine(schedule);
+
+ return 0;
+}
+
+/**
+ * xhci_queue_req - construct and queue request for an URB
+ */
+static int xhci_queue_req(struct urb *urb)
+{
+ unsigned long flags;
+ usbif_request_t *req;
+ usbif_front_ring_t *usb_ring = &xhci->usb_ring;
+
+#if DEBUG
+ printk(KERN_DEBUG
+ "usbif = %p, req_prod = %d (@ 0x%lx), resp_prod = %d, resp_cons = %d\n",
+ usbif, usbif->req_prod, virt_to_machine(&usbif->req_prod),
+ usbif->resp_prod, xhci->usb_resp_cons);
+#endif
+
+ spin_lock_irqsave(&xhci->ring_lock, flags);
+
+ if ( RING_PLUGGED(xhci) )
+ {
+ printk(KERN_WARNING
+ "xhci_queue_req(): USB ring plugged, not queuing request\n");
+ spin_unlock_irqrestore(&xhci->ring_lock, flags);
+ return -ENOBUFS;
+ }
+
+ /* Stick something in the shared communications ring. */
+ req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
+
+ req->operation = USBIF_OP_IO;
+ req->port = 0; /* We don't care what the port is. */
+ req->id = (unsigned long) urb->hcpriv;
+ req->transfer_buffer = virt_to_machine(urb->transfer_buffer);
+ req->devnum = usb_pipedevice(urb->pipe);
+ req->direction = usb_pipein(urb->pipe);
+ req->speed = usb_pipeslow(urb->pipe);
+ req->pipe_type = usb_pipetype(urb->pipe);
+ req->length = urb->transfer_buffer_length;
+ req->transfer_flags = urb->transfer_flags;
+ req->endpoint = usb_pipeendpoint(urb->pipe);
+ req->speed = usb_pipeslow(urb->pipe);
+ req->timeout = urb->timeout * (1000 / HZ);
+
+ if ( usb_pipetype(urb->pipe) == 0 ) /* ISO */
+ {
+ int ret = xhci_construct_isoc(req, urb);
+ if ( ret != 0 )
+ return ret;
+ }
+
+ if(urb->setup_packet != NULL)
+ memcpy(req->setup, urb->setup_packet, 8);
+ else
+ memset(req->setup, 0, 8);
+
+ usb_ring->req_prod_pvt++;
+ RING_PUSH_REQUESTS(usb_ring);
+
+ spin_unlock_irqrestore(&xhci->ring_lock, flags);
+
+ notify_via_evtchn(xhci->evtchn);
+
+ DPRINTK("Queued request for an URB.\n");
+ dump_urb(urb);
+
+ return -EINPROGRESS;
+}
+
+/**
+ * xhci_queue_probe - queue a probe request for a particular port
+ */
+static inline usbif_request_t *xhci_queue_probe(usbif_vdev_t port)
+{
+ usbif_request_t *req;
+ usbif_front_ring_t *usb_ring = &xhci->usb_ring;
+
+#if DEBUG
+ printk(KERN_DEBUG
+ "queuing probe: req_prod = %d (@ 0x%lx), resp_prod = %d, "
+ "resp_cons = %d\n", usbif->req_prod,
+ virt_to_machine(&usbif->req_prod),
+ usbif->resp_prod, xhci->usb_resp_cons);
+#endif
+
+ /* This is always called from the timer interrupt. */
+ spin_lock(&xhci->ring_lock);
+
+ if ( RING_PLUGGED(xhci) )
+ {
+ printk(KERN_WARNING
+ "xhci_queue_probe(): ring full, not queuing request\n");
+ spin_unlock(&xhci->ring_lock);
+ return NULL;
+ }
+
+ /* Stick something in the shared communications ring. */
+ req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
+
+ memset(req, 0, sizeof(*req));
+
+ req->operation = USBIF_OP_PROBE;
+ req->port = port;
+
+ usb_ring->req_prod_pvt++;
+ RING_PUSH_REQUESTS(usb_ring);
+
+ spin_unlock(&xhci->ring_lock);
+
+ notify_via_evtchn(xhci->evtchn);
+
+ return req;
+}
+
+/**
+ * xhci_port_reset - queue a reset request for a particular port
+ */
+static int xhci_port_reset(usbif_vdev_t port)
+{
+ usbif_request_t *req;
+ usbif_front_ring_t *usb_ring = &xhci->usb_ring;
+
+ /* Only ever happens from process context (hub thread). */
+ spin_lock_irq(&xhci->ring_lock);
+
+ if ( RING_PLUGGED(xhci) )
+ {
+ printk(KERN_WARNING
+ "xhci_port_reset(): ring plugged, not queuing request\n");
+ spin_unlock_irq(&xhci->ring_lock);
+ return -ENOBUFS;
+ }
+
+ /* We only reset one port at a time, so we only need one variable per
+ * hub. */
+ xhci->awaiting_reset = 1;
+
+ /* Stick something in the shared communications ring. */
+ req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
+
+ memset(req, 0, sizeof(*req));
+
+ req->operation = USBIF_OP_RESET;
+ req->port = port;
+
+ usb_ring->req_prod_pvt++;
+ RING_PUSH_REQUESTS(usb_ring);
+
+ spin_unlock_irq(&xhci->ring_lock);
+
+ notify_via_evtchn(xhci->evtchn);
+
+ while ( xhci->awaiting_reset > 0 )
+ {
+ mdelay(1);
+ xhci_drain_ring();
+ }
+
+ xhci->rh.ports[port].pe = 1;
+ xhci->rh.ports[port].pe_chg = 1;
+
+ return xhci->awaiting_reset;
+}
+
+
+/******************************************************************************
+ * RING RESPONSE HANDLING
+ */
+
+static void receive_usb_reset(usbif_response_t *resp)
+{
+ xhci->awaiting_reset = resp->status;
+ rmb();
+
+}
+
+static void receive_usb_probe(usbif_response_t *resp)
+{
+ spin_lock(&xhci->rh.port_state_lock);
+
+ if ( resp->status >= 0 )
+ {
+ if ( resp->status == 1 )
+ {
+ /* If theres a device there and there wasn't one before there must
+ * have been a connection status change. */
+ if( xhci->rh.ports[resp->data].cs == 0 )
+ {
+ xhci->rh.ports[resp->data].cs = 1;
+ xhci->rh.ports[resp->data].cs_chg = 1;
+ }
+ }
+ else if ( resp->status == 0 )
+ {
+ if(xhci->rh.ports[resp->data].cs == 1 )
+ {
+ xhci->rh.ports[resp->data].cs = 0;
+ xhci->rh.ports[resp->data].cs_chg = 1;
+ xhci->rh.ports[resp->data].pe = 0;
+ /* According to USB Spec v2.0, 11.24.2.7.2.2, we don't need
+ * to set pe_chg since an error has not occurred. */
+ }
+ }
+ else
+ printk(KERN_WARNING "receive_usb_probe(): unexpected status %d "
+ "for port %d\n", resp->status, resp->data);
+ }
+ else if ( resp->status < 0)
+ printk(KERN_WARNING "receive_usb_probe(): got error status %d\n",
+ resp->status);
+
+ spin_unlock(&xhci->rh.port_state_lock);
+}
+
+static void receive_usb_io(usbif_response_t *resp)
+{
+ struct urb_priv *urbp = (struct urb_priv *)resp->id;
+ struct urb *urb = urbp->urb;
+
+ urb->actual_length = resp->length;
+ urbp->in_progress = 0;
+
+ if( usb_pipetype(urb->pipe) == 0 ) /* ISO */
+ {
+ int i;
+
+ /* Copy ISO schedule results back in. */
+ for ( i = 0; i < urb->number_of_packets; i++ )
+ {
+ urb->iso_frame_desc[i].status
+ = urbp->schedule[i].status;
+ urb->iso_frame_desc[i].actual_length
+ = urbp->schedule[i].length;
+ }
+ free_page((unsigned long)urbp->schedule);
+ }
+
+ /* Only set status if it's not been changed since submission. It might
+ * have been changed if the URB has been unlinked asynchronously, for
+ * instance. */
+ if ( urb->status == -EINPROGRESS )
+ urbp->status = urb->status = resp->status;
+}
+
+/**
+ * xhci_drain_ring - drain responses from the ring, calling handlers
+ *
+ * This may be called from interrupt context when an event is received from the
+ * backend domain, or sometimes in process context whilst waiting for a port
+ * reset or URB completion.
+ */
+static void xhci_drain_ring(void)
+{
+ struct list_head *tmp, *head;
+ usbif_front_ring_t *usb_ring = &xhci->usb_ring;
+ usbif_response_t *resp;
+ RING_IDX i, rp;
+
+ /* Walk the ring here to get responses, updating URBs to show what
+ * completed. */
+
+ rp = usb_ring->sring->rsp_prod;
+ rmb(); /* Ensure we see queued requests up to 'rp'. */
+
+ /* Take items off the comms ring, taking care not to overflow. */
+ for ( i = usb_ring->rsp_cons; i != rp; i++ )
+ {
+ resp = RING_GET_RESPONSE(usb_ring, i);
+
+ /* May need to deal with batching and with putting a ceiling on
+ the number dispatched for performance and anti-dos reasons */
+
+ xhci_show_resp(resp);
+
+ switch ( resp->operation )
+ {
+ case USBIF_OP_PROBE:
+ receive_usb_probe(resp);
+ break;
+
+ case USBIF_OP_IO:
+ receive_usb_io(resp);
+ break;
+
+ case USBIF_OP_RESET:
+ receive_usb_reset(resp);
+ break;
+
+ default:
+ printk(KERN_WARNING
+ "error: unknown USB io operation response [%d]\n",
+ resp->operation);
+ break;
+ }
+ }
+
+ usb_ring->rsp_cons = i;
+
+ /* Walk the list of pending URB's to see which ones completed and do
+ * callbacks, etc. */
+ spin_lock(&xhci->urb_list_lock);
+ head = &xhci->urb_list;
+ tmp = head->next;
+ while (tmp != head) {
+ struct urb *urb = list_entry(tmp, struct urb, urb_list);
+
+ tmp = tmp->next;
+
+ /* Checks the status and does all of the magic necessary */
+ xhci_transfer_result(xhci, urb);
+ }
+ spin_unlock(&xhci->urb_list_lock);
+
+ xhci_finish_completion();
+}
+
+
+static void xhci_interrupt(int irq, void *__xhci, struct pt_regs *regs)
+{
+ xhci_drain_ring();
+}
+
+/******************************************************************************
+ * HOST CONTROLLER FUNCTIONALITY
+ */
+
+/**
+ * no-op implementation of private device alloc / free routines
+ */
+static int xhci_do_nothing_dev(struct usb_device *dev)
+{
+ return 0;
+}
+
+static inline void xhci_add_complete(struct urb *urb)
+{
+ struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xhci->complete_list_lock, flags);
+ list_add_tail(&urbp->complete_list, &xhci->complete_list);
+ spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
+}
+
+/* When this returns, the owner of the URB may free its
+ * storage.
+ *
+ * We spin and wait for the URB to complete before returning.
+ *
+ * Call with urb->lock acquired.
+ */
+static void xhci_delete_urb(struct urb *urb)
+{
+ struct urb_priv *urbp;
+
+ urbp = urb->hcpriv;
+
+ /* If there's no urb_priv structure for this URB then it can't have
+ * been submitted at all. */
+ if ( urbp == NULL )
+ return;
+
+ /* For now we just spin until the URB completes. It shouldn't take too
+ * long and we don't expect to have to do this very often. */
+ while ( urb->status == -EINPROGRESS )
+ {
+ xhci_drain_ring();
+ mdelay(1);
+ }
+
+ /* Now we know that further transfers to the buffer won't
+ * occur, so we can safely return. */
+}
+
+static struct urb_priv *xhci_alloc_urb_priv(struct urb *urb)
+{
+ struct urb_priv *urbp;
+
+ urbp = kmem_cache_alloc(xhci_up_cachep, SLAB_ATOMIC);
+ if (!urbp) {
+ err("xhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n");
+ return NULL;
+ }
+
+ memset((void *)urbp, 0, sizeof(*urbp));
+
+ urbp->inserttime = jiffies;
+ urbp->urb = urb;
+ urbp->dev = urb->dev;
+
+ INIT_LIST_HEAD(&urbp->complete_list);
+
+ urb->hcpriv = urbp;
+
+ return urbp;
+}
+
+/*
+ * MUST be called with urb->lock acquired
+ */
+/* When is this called? Do we need to stop the transfer (as we
+ * currently do)? */
+static void xhci_destroy_urb_priv(struct urb *urb)
+{
+ struct urb_priv *urbp;
+
+ urbp = (struct urb_priv *)urb->hcpriv;
+ if (!urbp)
+ return;
+
+ if (!list_empty(&urb->urb_list))
+ warn("xhci_destroy_urb_priv: urb %p still on xhci->urb_list", urb);
+
+ if (!list_empty(&urbp->complete_list))
+ warn("xhci_destroy_urb_priv: urb %p still on xhci->complete_list", urb);
+
+ kmem_cache_free(xhci_up_cachep, urb->hcpriv);
+
+ urb->hcpriv = NULL;
+}
+
+/**
+ * Try to find URBs in progress on the same pipe to the same device.
+ *
+ * MUST be called with xhci->urb_list_lock acquired
+ */
+static struct urb *xhci_find_urb_ep(struct xhci *xhci, struct urb *urb)
+{
+ struct list_head *tmp, *head;
+
+ /* We don't match Isoc transfers since they are special */
+ if (usb_pipeisoc(urb->pipe))
+ return NULL;
+
+ head = &xhci->urb_list;
+ tmp = head->next;
+ while (tmp != head) {
+ struct urb *u = list_entry(tmp, struct urb, urb_list);
+
+ tmp = tmp->next;
+
+ if (u->dev == urb->dev && u->pipe == urb->pipe &&
+ u->status == -EINPROGRESS)
+ return u;
+ }
+
+ return NULL;
+}
+
+static int xhci_submit_urb(struct urb *urb)
+{
+ int ret = -EINVAL;
+ unsigned long flags;
+ struct urb *eurb;
+ int bustime;
+
+ DPRINTK("URB submitted to XHCI driver.\n");
+ dump_urb(urb);
+
+ if (!urb)
+ return -EINVAL;
+
+ if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv) {
+ warn("xhci_submit_urb: urb %p belongs to disconnected device or bus?", urb);
+ return -ENODEV;
+ }
+
+ if ( urb->dev->devpath == NULL )
+ BUG();
+
+ usb_inc_dev_use(urb->dev);
+
+ spin_lock_irqsave(&xhci->urb_list_lock, flags);
+ spin_lock(&urb->lock);
+
+ if (urb->status == -EINPROGRESS || urb->status == -ECONNRESET ||
+ urb->status == -ECONNABORTED) {
+ dbg("xhci_submit_urb: urb not available to submit (status = %d)", urb->status);
+ /* Since we can have problems on the out path */
+ spin_unlock(&urb->lock);
+ spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
+ usb_dec_dev_use(urb->dev);
+
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&urb->urb_list);
+ if (!xhci_alloc_urb_priv(urb)) {
+ ret = -ENOMEM;
+
+ goto out;
+ }
+
+ ( (struct urb_priv *)urb->hcpriv )->in_progress = 1;
+
+ eurb = xhci_find_urb_ep(xhci, urb);
+ if (eurb && !(urb->transfer_flags & USB_QUEUE_BULK)) {
+ ret = -ENXIO;
+
+ goto out;
+ }
+
+ /* Short circuit the virtual root hub */
+ if (urb->dev == xhci->rh.dev) {
+ ret = rh_submit_urb(urb);
+
+ goto out;
+ }
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ ret = xhci_queue_req(urb);
+ break;
+
+ case PIPE_INTERRUPT:
+ if (urb->bandwidth == 0) { /* not yet checked/allocated */
+ bustime = usb_check_bandwidth(urb->dev, urb);
+ if (bustime < 0)
+ ret = bustime;
+ else {
+ ret = xhci_queue_req(urb);
+ if (ret == -EINPROGRESS)
+ usb_claim_bandwidth(urb->dev, urb,
+ bustime, 0);
+ }
+ } else /* bandwidth is already set */
+ ret = xhci_queue_req(urb);
+ break;
+
+ case PIPE_ISOCHRONOUS:
+ if (urb->bandwidth == 0) { /* not yet checked/allocated */
+ if (urb->number_of_packets <= 0) {
+ ret = -EINVAL;
+ break;
+ }
+ bustime = usb_check_bandwidth(urb->dev, urb);
+ if (bustime < 0) {
+ ret = bustime;
+ break;
+ }
+
+ ret = xhci_queue_req(urb);
+ if (ret == -EINPROGRESS)
+ usb_claim_bandwidth(urb->dev, urb, bustime, 1);
+ } else /* bandwidth is already set */
+ ret = xhci_queue_req(urb);
+ break;
+ }
+out:
+ urb->status = ret;
+
+ if (ret == -EINPROGRESS) {
+ /* We use _tail to make find_urb_ep more efficient */
+ list_add_tail(&urb->urb_list, &xhci->urb_list);
+
+ spin_unlock(&urb->lock);
+ spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
+
+ return 0;
+ }
+
+ xhci_delete_urb(urb);
+
+ spin_unlock(&urb->lock);
+ spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
+
+ /* Only call completion if it was successful */
+ if (!ret)
+ xhci_call_completion(urb);
+
+ return ret;
+}
+
+/*
+ * Return the result of a transfer
+ *
+ * MUST be called with urb_list_lock acquired
+ */
+static void xhci_transfer_result(struct xhci *xhci, struct urb *urb)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct urb_priv *urbp;
+
+ /* The root hub is special */
+ if (urb->dev == xhci->rh.dev)
+ return;
+
+ spin_lock_irqsave(&urb->lock, flags);
+
+ urbp = (struct urb_priv *)urb->hcpriv;
+
+ if ( ( (struct urb_priv *)urb->hcpriv )->in_progress )
+ ret = -EINPROGRESS;
+
+ if (urb->actual_length < urb->transfer_buffer_length) {
+ if (urb->transfer_flags & USB_DISABLE_SPD) {
+ ret = -EREMOTEIO;
+ }
+ }
+
+ if (urb->status == -EPIPE)
+ {
+ ret = urb->status;
+ /* endpoint has stalled - mark it halted */
+ usb_endpoint_halt(urb->dev, usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe));
+ }
+
+ if ((debug == 1 && ret != 0 && ret != -EPIPE) ||
+ (ret != 0 && debug > 1)) {
+ /* Some debugging code */
+ dbg("xhci_result_interrupt/bulk() failed with status %x",
+ status);
+ }
+
+ if (ret == -EINPROGRESS)
+ goto out;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ case PIPE_ISOCHRONOUS:
+ /* Release bandwidth for Interrupt or Isoc. transfers */
+ /* Spinlock needed ? */
+ if (urb->bandwidth)
+ usb_release_bandwidth(urb->dev, urb, 1);
+ xhci_delete_urb(urb);
+ break;
+ case PIPE_INTERRUPT:
+ /* Interrupts are an exception */
+ if (urb->interval)
+ goto out_complete;
+
+ /* Release bandwidth for Interrupt or Isoc. transfers */
+ /* Spinlock needed ? */
+ if (urb->bandwidth)
+ usb_release_bandwidth(urb->dev, urb, 0);
+ xhci_delete_urb(urb);
+ break;
+ default:
+ info("xhci_transfer_result: unknown pipe type %d for urb %p\n",
+ usb_pipetype(urb->pipe), urb);
+ }
+
+ /* Remove it from xhci->urb_list */
+ list_del_init(&urb->urb_list);
+
+out_complete:
+ xhci_add_complete(urb);
+
+out:
+ spin_unlock_irqrestore(&urb->lock, flags);
+}
+
+static int xhci_unlink_urb(struct urb *urb)
+{
+ unsigned long flags;
+ struct urb_priv *urbp = urb->hcpriv;
+
+ if (!urb)
+ return -EINVAL;
+
+ if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv)
+ return -ENODEV;
+
+ spin_lock_irqsave(&xhci->urb_list_lock, flags);
+ spin_lock(&urb->lock);
+
+ /* Release bandwidth for Interrupt or Isoc. transfers */
+ /* Spinlock needed ? */
+ if (urb->bandwidth) {
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_INTERRUPT:
+ usb_release_bandwidth(urb->dev, urb, 0);
+ break;
+ case PIPE_ISOCHRONOUS:
+ usb_release_bandwidth(urb->dev, urb, 1);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (urb->status != -EINPROGRESS) {
+ spin_unlock(&urb->lock);
+ spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
+ return 0;
+ }
+
+ list_del_init(&urb->urb_list);
+
+ /* Short circuit the virtual root hub */
+ if (urb->dev == xhci->rh.dev) {
+ rh_unlink_urb(urb);
+
+ spin_unlock(&urb->lock);
+ spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
+
+ xhci_call_completion(urb);
+ } else {
+ if (urb->transfer_flags & USB_ASYNC_UNLINK) {
+ /* We currently don't currently attempt to cancel URBs
+ * that have been queued in the ring. We handle async
+ * unlinked URBs when they complete. */
+ urbp->status = urb->status = -ECONNABORTED;
+ spin_unlock(&urb->lock);
+ spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
+ } else {
+ urb->status = -ENOENT;
+
+ spin_unlock(&urb->lock);
+ spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
+
+ if (in_interrupt()) { /* wait at least 1 frame */
+ static int errorcount = 10;
+
+ if (errorcount--)
+ dbg("xhci_unlink_urb called from interrupt for urb %p", urb);
+ udelay(1000);
+ } else
+ schedule_timeout(1+1*HZ/1000);
+
+ xhci_delete_urb(urb);
+
+ xhci_call_completion(urb);
+ }
+ }
+
+ return 0;
+}
+
+static void xhci_call_completion(struct urb *urb)
+{
+ struct urb_priv *urbp;
+ struct usb_device *dev = urb->dev;
+ int is_ring = 0, killed, resubmit_interrupt, status;
+ struct urb *nurb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&urb->lock, flags);
+
+ urbp = (struct urb_priv *)urb->hcpriv;
+ if (!urbp || !urb->dev) {
+ spin_unlock_irqrestore(&urb->lock, flags);
+ return;
+ }
+
+ killed = (urb->status == -ENOENT || urb->status == -ECONNABORTED ||
+ urb->status == -ECONNRESET);
+ resubmit_interrupt = (usb_pipetype(urb->pipe) == PIPE_INTERRUPT &&
+ urb->interval);
+
+ nurb = urb->next;
+ if (nurb && !killed) {
+ int count = 0;
+
+ while (nurb && nurb != urb && count < MAX_URB_LOOP) {
+ if (nurb->status == -ENOENT ||
+ nurb->status == -ECONNABORTED ||
+ nurb->status == -ECONNRESET) {
+ killed = 1;
+ break;
+ }
+
+ nurb = nurb->next;
+ count++;
+ }
+
+ if (count == MAX_URB_LOOP)
+ err("xhci_call_completion: too many linked URB's, loop? (first loop)");
+
+ /* Check to see if chain is a ring */
+ is_ring = (nurb == urb);
+ }
+
+ status = urbp->status;
+ if (!resubmit_interrupt || killed)
+ /* We don't need urb_priv anymore */
+ xhci_destroy_urb_priv(urb);
+
+ if (!killed)
+ urb->status = status;
+
+ spin_unlock_irqrestore(&urb->lock, flags);
+
+ if (urb->complete)
+ urb->complete(urb);
+
+ if (resubmit_interrupt)
+ /* Recheck the status. The completion handler may have */
+ /* unlinked the resubmitting interrupt URB */
+ killed = (urb->status == -ENOENT ||
+ urb->status == -ECONNABORTED ||
+ urb->status == -ECONNRESET);
+
+ if (resubmit_interrupt && !killed) {
+ if ( urb->dev != xhci->rh.dev )
+ xhci_queue_req(urb); /* XXX What if this fails? */
+ /* Don't need to resubmit URBs for the virtual root dev. */
+ } else {
+ if (is_ring && !killed) {
+ urb->dev = dev;
+ xhci_submit_urb(urb);
+ } else {
+ /* We decrement the usage count after we're done */
+ /* with everything */
+ usb_dec_dev_use(dev);
+ }
+ }
+}
+
+static void xhci_finish_completion(void)
+{
+ struct list_head *tmp, *head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xhci->complete_list_lock, flags);
+ head = &xhci->complete_list;
+ tmp = head->next;
+ while (tmp != head) {
+ struct urb_priv *urbp = list_entry(tmp, struct urb_priv,
+ complete_list);
+ struct urb *urb = urbp->urb;
+
+ list_del_init(&urbp->complete_list);
+ spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
+
+ xhci_call_completion(urb);
+
+ spin_lock_irqsave(&xhci->complete_list_lock, flags);
+ head = &xhci->complete_list;
+ tmp = head->next;
+ }
+ spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
+}
+
+static struct usb_operations xhci_device_operations = {
+ .allocate = xhci_do_nothing_dev,
+ .deallocate = xhci_do_nothing_dev,
+ /* It doesn't look like any drivers actually care what the frame number
+ * is at the moment! If necessary, we could approximate the current
+ * frame nubmer by passing it from the backend in response messages. */
+ .get_frame_number = NULL,
+ .submit_urb = xhci_submit_urb,
+ .unlink_urb = xhci_unlink_urb
+};
+
+/******************************************************************************
+ * VIRTUAL ROOT HUB EMULATION
+ */
+
+static __u8 root_hub_dev_des[] =
+{
+ 0x12, /* __u8 bLength; */
+ 0x01, /* __u8 bDescriptorType; Device */
+ 0x00, /* __u16 bcdUSB; v1.0 */
+ 0x01,
+ 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
+ 0x00, /* __u8 bDeviceSubClass; */
+ 0x00, /* __u8 bDeviceProtocol; */
+ 0x08, /* __u8 bMaxPacketSize0; 8 Bytes */
+ 0x00, /* __u16 idVendor; */
+ 0x00,
+ 0x00, /* __u16 idProduct; */
+ 0x00,
+ 0x00, /* __u16 bcdDevice; */
+ 0x00,
+ 0x00, /* __u8 iManufacturer; */
+ 0x02, /* __u8 iProduct; */
+ 0x01, /* __u8 iSerialNumber; */
+ 0x01 /* __u8 bNumConfigurations; */
+};
+
+
+/* Configuration descriptor */
+static __u8 root_hub_config_des[] =
+{
+ 0x09, /* __u8 bLength; */
+ 0x02, /* __u8 bDescriptorType; Configuration */
+ 0x19, /* __u16 wTotalLength; */
+ 0x00,
+ 0x01, /* __u8 bNumInterfaces; */
+ 0x01, /* __u8 bConfigurationValue; */
+ 0x00, /* __u8 iConfiguration; */
+ 0x40, /* __u8 bmAttributes;
+ Bit 7: Bus-powered, 6: Self-powered,
+ Bit 5 Remote-wakeup, 4..0: resvd */
+ 0x00, /* __u8 MaxPower; */
+
+ /* interface */
+ 0x09, /* __u8 if_bLength; */
+ 0x04, /* __u8 if_bDescriptorType; Interface */
+ 0x00, /* __u8 if_bInterfaceNumber; */
+ 0x00, /* __u8 if_bAlternateSetting; */
+ 0x01, /* __u8 if_bNumEndpoints; */
+ 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
+ 0x00, /* __u8 if_bInterfaceSubClass; */
+ 0x00, /* __u8 if_bInterfaceProtocol; */
+ 0x00, /* __u8 if_iInterface; */
+
+ /* endpoint */
+ 0x07, /* __u8 ep_bLength; */
+ 0x05, /* __u8 ep_bDescriptorType; Endpoint */
+ 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
+ 0x03, /* __u8 ep_bmAttributes; Interrupt */
+ 0x08, /* __u16 ep_wMaxPacketSize; 8 Bytes */
+ 0x00,
+ 0xff /* __u8 ep_bInterval; 255 ms */
+};
+
+static __u8 root_hub_hub_des[] =
+{
+ 0x09, /* __u8 bLength; */
+ 0x29, /* __u8 bDescriptorType; Hub-descriptor */
+ 0x02, /* __u8 bNbrPorts; */
+ 0x00, /* __u16 wHubCharacteristics; */
+ 0x00,
+ 0x01, /* __u8 bPwrOn2pwrGood; 2ms */
+ 0x00, /* __u8 bHubContrCurrent; 0 mA */
+ 0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
+ 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
+};
+
+/* prepare Interrupt pipe transaction data; HUB INTERRUPT ENDPOINT */
+static int rh_send_irq(struct urb *urb)
+{
+ struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
+ xhci_port_t *ports = xhci->rh.ports;
+ unsigned long flags;
+ int i, len = 1;
+ __u16 data = 0;
+
+ spin_lock_irqsave(&urb->lock, flags);
+ for (i = 0; i < xhci->rh.numports; i++) {
+ /* Set a bit if anything at all has changed on the port, as per
+ * USB spec 11.12 */
+ data |= (ports[i].cs_chg || ports[i].pe_chg )
+ ? (1 << (i + 1))
+ : 0;
+
+ len = (i + 1) / 8 + 1;
+ }
+
+ *(__u16 *) urb->transfer_buffer = cpu_to_le16(data);
+ urb->actual_length = len;
+ urbp->status = 0;
+
+ spin_unlock_irqrestore(&urb->lock, flags);
+
+ if ((data > 0) && (xhci->rh.send != 0)) {
+ dbg("root-hub INT complete: data: %x", data);
+ xhci_call_completion(urb);
+ }
+
+ return 0;
+}
+
+/* Virtual Root Hub INTs are polled by this timer every "interval" ms */
+static int rh_init_int_timer(struct urb *urb);
+
+static void rh_int_timer_do(unsigned long ptr)
+{
+ struct urb *urb = (struct urb *)ptr;
+ struct list_head list, *tmp, *head;
+ unsigned long flags;
+ int i;
+
+ for ( i = 0; i < xhci->rh.numports; i++)
+ xhci_queue_probe(i);
+
+ if (xhci->rh.send)
+ rh_send_irq(urb);
+
+ INIT_LIST_HEAD(&list);
+
+ spin_lock_irqsave(&xhci->urb_list_lock, flags);
+ head = &xhci->urb_list;
+ tmp = head->next;
+ while (tmp != head) {
+ struct urb *u = list_entry(tmp, struct urb, urb_list);
+ struct urb_priv *up = (struct urb_priv *)u->hcpriv;
+
+ tmp = tmp->next;
+
+ spin_lock(&u->lock);
+
+ /* Check if the URB timed out */
+ if (u->timeout && time_after_eq(jiffies,
+ up->inserttime + u->timeout)) {
+ list_del(&u->urb_list);
+ list_add_tail(&u->urb_list, &list);
+ }
+
+ spin_unlock(&u->lock);
+ }
+ spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
+
+ head = &list;
+ tmp = head->next;
+ while (tmp != head) {
+ struct urb *u = list_entry(tmp, struct urb, urb_list);
+
+ tmp = tmp->next;
+
+ u->transfer_flags |= USB_ASYNC_UNLINK | USB_TIMEOUT_KILLED;
+ xhci_unlink_urb(u);
+ }
+
+ rh_init_int_timer(urb);
+}
+
+/* Root Hub INTs are polled by this timer */
+static int rh_init_int_timer(struct urb *urb)
+{
+ xhci->rh.interval = urb->interval;
+ init_timer(&xhci->rh.rh_int_timer);
+ xhci->rh.rh_int_timer.function = rh_int_timer_do;
+ xhci->rh.rh_int_timer.data = (unsigned long)urb;
+ xhci->rh.rh_int_timer.expires = jiffies
+ + (HZ * (urb->interval < 30 ? 30 : urb->interval)) / 1000;
+ add_timer(&xhci->rh.rh_int_timer);
+
+ return 0;
+}
+
+#define OK(x) len = (x); break
+
+/* Root Hub Control Pipe */
+static int rh_submit_urb(struct urb *urb)
+{
+ unsigned int pipe = urb->pipe;
+ struct usb_ctrlrequest *cmd =
+ (struct usb_ctrlrequest *)urb->setup_packet;
+ void *data = urb->transfer_buffer;
+ int leni = urb->transfer_buffer_length;
+ int len = 0;
+ xhci_port_t *status;
+ int stat = 0;
+ int i;
+ int retstatus;
+ unsigned long flags;
+
+ __u16 cstatus;
+ __u16 bmRType_bReq;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
+
+ if (usb_pipetype(pipe) == PIPE_INTERRUPT) {
+ xhci->rh.urb = urb;
+ xhci->rh.send = 1;
+ xhci->rh.interval = urb->interval;
+ rh_init_int_timer(urb);
+
+ return -EINPROGRESS;
+ }
+
+ bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
+ wValue = le16_to_cpu(cmd->wValue);
+ wIndex = le16_to_cpu(cmd->wIndex);
+ wLength = le16_to_cpu(cmd->wLength);
+
+ for (i = 0; i < 8; i++)
+ xhci->rh.c_p_r[i] = 0;
+
+ status = &xhci->rh.ports[wIndex - 1];
+
+ spin_lock_irqsave(&xhci->rh.port_state_lock, flags);
+
+ switch (bmRType_bReq) {
+ /* Request Destination:
+ without flags: Device,
+ RH_INTERFACE: interface,
+ RH_ENDPOINT: endpoint,
+ RH_CLASS means HUB here,
+ RH_OTHER | RH_CLASS almost ever means HUB_PORT here
+ */
+
+ case RH_GET_STATUS:
+ *(__u16 *)data = cpu_to_le16(1);
+ OK(2);
+ case RH_GET_STATUS | RH_INTERFACE:
+ *(__u16 *)data = cpu_to_le16(0);
+ OK(2);
+ case RH_GET_STATUS | RH_ENDPOINT:
+ *(__u16 *)data = cpu_to_le16(0);
+ OK(2);
+ case RH_GET_STATUS | RH_CLASS:
+ *(__u32 *)data = cpu_to_le32(0);
+ OK(4); /* hub power */
+ case RH_GET_STATUS | RH_OTHER | RH_CLASS:
+ cstatus = (status->cs_chg) |
+ (status->pe_chg << 1) |
+ (xhci->rh.c_p_r[wIndex - 1] << 4);
+ retstatus = (status->cs) |
+ (status->pe << 1) |
+ (status->susp << 2) |
+ (1 << 8) | /* power on */
+ (status->lsda << 9);
+ *(__u16 *)data = cpu_to_le16(retstatus);
+ *(__u16 *)(data + 2) = cpu_to_le16(cstatus);
+ OK(4);
+ case RH_CLEAR_FEATURE | RH_ENDPOINT:
+ switch (wValue) {
+ case RH_ENDPOINT_STALL:
+ OK(0);
+ }
+ break;
+ case RH_CLEAR_FEATURE | RH_CLASS:
+ switch (wValue) {
+ case RH_C_HUB_OVER_CURRENT:
+ OK(0); /* hub power over current */
+ }
+ break;
+ case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS:
+ switch (wValue) {
+ case RH_PORT_ENABLE:
+ status->pe = 0;
+ OK(0);
+ case RH_PORT_SUSPEND:
+ status->susp = 0;
+ OK(0);
+ case RH_PORT_POWER:
+ OK(0); /* port power */
+ case RH_C_PORT_CONNECTION:
+ status->cs_chg = 0;
+ OK(0);
+ case RH_C_PORT_ENABLE:
+ status->pe_chg = 0;
+ OK(0);
+ case RH_C_PORT_SUSPEND:
+ /*** WR_RH_PORTSTAT(RH_PS_PSSC); */
+ OK(0);
+ case RH_C_PORT_OVER_CURRENT:
+ OK(0); /* port power over current */
+ case RH_C_PORT_RESET:
+ xhci->rh.c_p_r[wIndex - 1] = 0;
+ OK(0);
+ }
+ break;
+ case RH_SET_FEATURE | RH_OTHER | RH_CLASS:
+ switch (wValue) {
+ case RH_PORT_SUSPEND:
+ status->susp = 1;
+ OK(0);
+ case RH_PORT_RESET:
+ {
+ int ret;
+ xhci->rh.c_p_r[wIndex - 1] = 1;
+ status->pr = 0;
+ status->pe = 1;
+ ret = xhci_port_reset(wIndex - 1);
+ /* XXX MAW: should probably cancel queued transfers during reset... *\/ */
+ if ( ret == 0 ) { OK(0); }
+ else { return ret; }
+ }
+ break;
+ case RH_PORT_POWER:
+ OK(0); /* port power ** */
+ case RH_PORT_ENABLE:
+ status->pe = 1;
+ OK(0);
+ }
+ break;
+ case RH_SET_ADDRESS:
+ xhci->rh.devnum = wValue;
+ OK(0);
+ case RH_GET_DESCRIPTOR:
+ switch ((wValue & 0xff00) >> 8) {
+ case 0x01: /* device descriptor */
+ len = min_t(unsigned int, leni,
+ min_t(unsigned int,
+ sizeof(root_hub_dev_des), wLength));
+ memcpy(data, root_hub_dev_des, len);
+ OK(len);
+ case 0x02: /* configuration descriptor */
+ len = min_t(unsigned int, leni,
+ min_t(unsigned int,
+ sizeof(root_hub_config_des), wLength));
+ memcpy (data, root_hub_config_des, len);
+ OK(len);
+ case 0x03: /* string descriptors */
+ len = usb_root_hub_string (wValue & 0xff,
+ 0, "XHCI-alt",
+ data, wLength);
+ if (len > 0) {
+ OK(min_t(int, leni, len));
+ } else
+ stat = -EPIPE;
+ }
+ break;
+ case RH_GET_DESCRIPTOR | RH_CLASS:
+ root_hub_hub_des[2] = xhci->rh.numports;
+ len = min_t(unsigned int, leni,
+ min_t(unsigned int, sizeof(root_hub_hub_des), wLength));
+ memcpy(data, root_hub_hub_des, len);
+ OK(len);
+ case RH_GET_CONFIGURATION:
+ *(__u8 *)data = 0x01;
+ OK(1);
+ case RH_SET_CONFIGURATION:
+ OK(0);
+ case RH_GET_INTERFACE | RH_INTERFACE:
+ *(__u8 *)data = 0x00;
+ OK(1);
+ case RH_SET_INTERFACE | RH_INTERFACE:
+ OK(0);
+ default:
+ stat = -EPIPE;
+ }
+
+ spin_unlock_irqrestore(&xhci->rh.port_state_lock, flags);
+
+ urb->actual_length = len;
+
+ return stat;
+}
+
+/*
+ * MUST be called with urb->lock acquired
+ */
+static int rh_unlink_urb(struct urb *urb)
+{
+ if (xhci->rh.urb == urb) {
+ urb->status = -ENOENT;
+ xhci->rh.send = 0;
+ xhci->rh.urb = NULL;
+ del_timer(&xhci->rh.rh_int_timer);
+ }
+ return 0;
+}
+
+/******************************************************************************
+ * CONTROL PLANE FUNCTIONALITY
+ */
+
+/**
+ * alloc_xhci - initialise a new virtual root hub for a new USB device channel
+ */
+static int alloc_xhci(void)
+{
+ int retval;
+ struct usb_bus *bus;
+
+ retval = -EBUSY;
+
+ xhci = kmalloc(sizeof(*xhci), GFP_KERNEL);
+ if (!xhci) {
+ err("couldn't allocate xhci structure");
+ retval = -ENOMEM;
+ goto err_alloc_xhci;
+ }
+
+ xhci->state = USBIF_STATE_CLOSED;
+
+ spin_lock_init(&xhci->urb_list_lock);
+ INIT_LIST_HEAD(&xhci->urb_list);
+
+ spin_lock_init(&xhci->complete_list_lock);
+ INIT_LIST_HEAD(&xhci->complete_list);
+
+ spin_lock_init(&xhci->frame_list_lock);
+
+ bus = usb_alloc_bus(&xhci_device_operations);
+
+ if (!bus) {
+ err("unable to allocate bus");
+ goto err_alloc_bus;
+ }
+
+ xhci->bus = bus;
+ bus->bus_name = "XHCI";
+ bus->hcpriv = xhci;
+
+ usb_register_bus(xhci->bus);
+
+ /* Initialize the root hub */
+
+ xhci->rh.numports = 0;
+
+ xhci->bus->root_hub = xhci->rh.dev = usb_alloc_dev(NULL, xhci->bus);
+ if (!xhci->rh.dev) {
+ err("unable to allocate root hub");
+ goto err_alloc_root_hub;
+ }
+
+ xhci->state = 0;
+
+ return 0;
+
+/*
+ * error exits:
+ */
+err_alloc_root_hub:
+ usb_deregister_bus(xhci->bus);
+ usb_free_bus(xhci->bus);
+ xhci->bus = NULL;
+
+err_alloc_bus:
+ kfree(xhci);
+
+err_alloc_xhci:
+ return retval;
+}
+
+/**
+ * usbif_status_change - deal with an incoming USB_INTERFACE_STATUS_ message
+ */
+static void usbif_status_change(usbif_fe_interface_status_changed_t *status)
+{
+ ctrl_msg_t cmsg;
+ usbif_fe_interface_connect_t up;
+ long rc;
+ usbif_sring_t *sring;
+
+ switch ( status->status )
+ {
+ case USBIF_INTERFACE_STATUS_DESTROYED:
+ printk(KERN_WARNING "Unexpected usbif-DESTROYED message in state %d\n",
+ xhci->state);
+ break;
+
+ case USBIF_INTERFACE_STATUS_DISCONNECTED:
+ if ( xhci->state != USBIF_STATE_CLOSED )
+ {
+ printk(KERN_WARNING "Unexpected usbif-DISCONNECTED message"
+ " in state %d\n", xhci->state);
+ break;
+ /* Not bothering to do recovery here for now. Keep things
+ * simple. */
+
+ spin_lock_irq(&xhci->ring_lock);
+
+ /* Clean up resources. */
+ free_page((unsigned long)xhci->usb_ring.sring);
+ free_irq(xhci->irq, xhci);
+ unbind_evtchn_from_irq(xhci->evtchn);
+
+ /* Plug the ring. */
+ xhci->recovery = 1;
+ wmb();
+
+ spin_unlock_irq(&xhci->ring_lock);
+ }
+
+ /* Move from CLOSED to DISCONNECTED state. */
+ sring = (usbif_sring_t *)__get_free_page(GFP_KERNEL);
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&xhci->usb_ring, sring, PAGE_SIZE);
+ xhci->state = USBIF_STATE_DISCONNECTED;
+
+ /* Construct an interface-CONNECT message for the domain controller. */
+ cmsg.type = CMSG_USBIF_FE;
+ cmsg.subtype = CMSG_USBIF_FE_INTERFACE_CONNECT;
+ cmsg.length = sizeof(usbif_fe_interface_connect_t);
+ up.shmem_frame = virt_to_machine(sring) >> PAGE_SHIFT;
+ memcpy(cmsg.msg, &up, sizeof(up));
+
+ /* Tell the controller to bring up the interface. */
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+ break;
+
+ case USBIF_INTERFACE_STATUS_CONNECTED:
+ if ( xhci->state == USBIF_STATE_CLOSED )
+ {
+ printk(KERN_WARNING "Unexpected usbif-CONNECTED message"
+ " in state %d\n", xhci->state);
+ break;
+ }
+
+ xhci->evtchn = status->evtchn;
+ xhci->irq = bind_evtchn_to_irq(xhci->evtchn);
+ xhci->bandwidth = status->bandwidth;
+ xhci->rh.numports = status->num_ports;
+
+ xhci->rh.ports = kmalloc (sizeof(xhci_port_t) * xhci->rh.numports, GFP_KERNEL);
+
+ if ( xhci->rh.ports == NULL )
+ goto alloc_ports_nomem;
+
+ memset(xhci->rh.ports, 0, sizeof(xhci_port_t) * xhci->rh.numports);
+
+ usb_connect(xhci->rh.dev);
+
+ if (usb_new_device(xhci->rh.dev) != 0) {
+ err("unable to start root hub");
+ }
+
+ /* Allocate the appropriate USB bandwidth here... Need to
+ * somehow know what the total available is thought to be so we
+ * can calculate the reservation correctly. */
+ usb_claim_bandwidth(xhci->rh.dev, xhci->rh.urb,
+ 1000 - xhci->bandwidth, 0);
+
+ if ( (rc = request_irq(xhci->irq, xhci_interrupt,
+ SA_SAMPLE_RANDOM, "usbif", xhci)) )
+ printk(KERN_ALERT"usbfront request_irq failed (%ld)\n",rc);
+
+ DPRINTK(KERN_INFO __FILE__
+ ": USB XHCI: SHM at %p (0x%lx), EVTCHN %d IRQ %d\n",
+ xhci->usb_ring.sring, virt_to_machine(xhci->usbif),
+ xhci->evtchn, xhci->irq);
+
+ xhci->state = USBIF_STATE_CONNECTED;
+
+ break;
+
+ default:
+ printk(KERN_WARNING "Status change to unknown value %d\n",
+ status->status);
+ break;
+ }
+
+ return;
+
+ alloc_ports_nomem:
+ printk(KERN_WARNING "Failed to allocate port memory, XHCI failed to connect.\n");
+ return;
+}
+
+/**
+ * usbif_ctrlif_rx - demux control messages by subtype
+ */
+static void usbif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+ switch ( msg->subtype )
+ {
+ case CMSG_USBIF_FE_INTERFACE_STATUS_CHANGED:
+ usbif_status_change((usbif_fe_interface_status_changed_t *)
+ &msg->msg[0]);
+ break;
+
+ /* New interface...? */
+ default:
+ msg->length = 0;
+ break;
+ }
+
+ ctrl_if_send_response(msg);
+}
+
+static void send_driver_up(void)
+{
+ control_msg_t cmsg;
+ usbif_fe_interface_status_changed_t st;
+
+ /* Send a driver-UP notification to the domain controller. */
+ cmsg.type = CMSG_USBIF_FE;
+ cmsg.subtype = CMSG_USBIF_FE_DRIVER_STATUS_CHANGED;
+ cmsg.length = sizeof(usbif_fe_driver_status_changed_t);
+ st.status = USBIF_DRIVER_STATUS_UP;
+ memcpy(cmsg.msg, &st, sizeof(st));
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
+
+void usbif_resume(void)
+{
+ int i;
+
+ /* Fake disconnection on all virtual USB ports (suspending / migrating
+ * will destroy hard state associated will the USB devices anyhow). */
+ /* No need to lock here. */
+ for ( i = 0; i < xhci->rh.numports; i++ )
+ {
+ xhci->rh.ports[i].cs = 0;
+ xhci->rh.ports[i].cs_chg = 1;
+ xhci->rh.ports[i].pe = 0;
+ }
+
+ send_driver_up();
+}
+
+static int __init xhci_hcd_init(void)
+{
+ int retval = -ENOMEM, i;
+
+ if ( (xen_start_info.flags & SIF_INITDOMAIN)
+ || (xen_start_info.flags & SIF_USB_BE_DOMAIN) )
+ return 0;
+
+ info(DRIVER_DESC " " DRIVER_VERSION);
+
+ if (debug) {
+ errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
+ if (!errbuf)
+ goto errbuf_failed;
+ }
+
+ xhci_up_cachep = kmem_cache_create("xhci_urb_priv",
+ sizeof(struct urb_priv), 0, 0, NULL, NULL);
+ if (!xhci_up_cachep)
+ goto up_failed;
+
+ /* Let the domain controller know we're here. For now we wait until
+ * connection, as for the block and net drivers. This is only strictly
+ * necessary if we're going to boot off a USB device. */
+ printk(KERN_INFO "Initialising Xen virtual USB hub\n");
+
+ (void)ctrl_if_register_receiver(CMSG_USBIF_FE, usbif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+
+ alloc_xhci();
+
+ send_driver_up();
+
+ /*
+ * We should read 'nr_interfaces' from response message and wait
+ * for notifications before proceeding. For now we assume that we
+ * will be notified of exactly one interface.
+ */
+ for ( i=0; (xhci->state != USBIF_STATE_CONNECTED) && (i < 10*HZ); i++ )
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ if (xhci->state != USBIF_STATE_CONNECTED)
+ printk(KERN_WARNING "Timeout connecting USB frontend driver!\n");
+
+ return 0;
+
+up_failed:
+ if (errbuf)
+ kfree(errbuf);
+
+errbuf_failed:
+ return retval;
+}
+
+module_init(xhci_hcd_init);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/usbfront/xhci.h b/linux-2.6.11-xen-sparse/drivers/xen/usbfront/xhci.h
new file mode 100644
index 0000000000..b42e860e2c
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/drivers/xen/usbfront/xhci.h
@@ -0,0 +1,183 @@
+/******************************************************************************
+ * xhci.h
+ *
+ * Private definitions for the Xen Virtual USB Controller. Based on
+ * drivers/usb/host/uhci.h from Linux. Copyright for the imported content is
+ * retained by the original authors.
+ *
+ * Modifications are:
+ * Copyright (C) 2004 Intel Research Cambridge
+ * Copyright (C) 2004, 2005 Mark Williamson
+ */
+
+#ifndef __LINUX_XHCI_H
+#define __LINUX_XHCI_H
+
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <asm-xen/xen-public/io/usbif.h>
+#include <linux/spinlock.h>
+
+/* xhci_port_t - current known state of a virtual hub ports */
+typedef struct {
+ unsigned int cs :1; /* Connection status. */
+ unsigned int cs_chg :1; /* Connection status change. */
+ unsigned int pe :1; /* Port enable. */
+ unsigned int pe_chg :1; /* Port enable change. */
+ unsigned int susp :1; /* Suspended. */
+ unsigned int lsda :1; /* Low speed device attached. */
+ unsigned int pr :1; /* Port reset. */
+} xhci_port_t;
+
+/* struct virt_root_hub - state related to the virtual root hub */
+struct virt_root_hub {
+ struct usb_device *dev;
+ int devnum; /* Address of Root Hub endpoint */
+ struct urb *urb;
+ void *int_addr;
+ int send;
+ int interval;
+ int numports;
+ int c_p_r[8];
+ struct timer_list rh_int_timer;
+ spinlock_t port_state_lock;
+ xhci_port_t *ports;
+};
+
+/* struct xhci - contains the state associated with a single USB interface */
+struct xhci {
+
+#ifdef CONFIG_PROC_FS
+ /* procfs */
+ int num;
+ struct proc_dir_entry *proc_entry;
+#endif
+
+ int evtchn; /* Interdom channel to backend */
+ int irq; /* Bound to evtchn */
+ enum {
+ USBIF_STATE_CONNECTED = 2,
+ USBIF_STATE_DISCONNECTED = 1,
+ USBIF_STATE_CLOSED = 0
+ } state; /* State of this USB interface */
+ unsigned long recovery; /* boolean recovery in progress flag */
+
+ unsigned long bandwidth;
+
+ struct usb_bus *bus;
+
+ /* Main list of URB's currently controlled by this HC */
+ spinlock_t urb_list_lock;
+ struct list_head urb_list; /* P: xhci->urb_list_lock */
+
+ /* List of URB's awaiting completion callback */
+ spinlock_t complete_list_lock;
+ struct list_head complete_list; /* P: xhci->complete_list_lock */
+
+ struct virt_root_hub rh; /* private data of the virtual root hub */
+
+ spinlock_t ring_lock;
+ usbif_front_ring_t usb_ring;
+
+ int awaiting_reset;
+};
+
+/* per-URB private data structure for the host controller */
+struct urb_priv {
+ struct urb *urb;
+ usbif_iso_t *schedule;
+ struct usb_device *dev;
+
+ int in_progress : 1; /* QH was queued (not linked in) */
+ int short_control_packet : 1; /* If we get a short packet during */
+ /* a control transfer, retrigger */
+ /* the status phase */
+
+ int status; /* Final status */
+
+ unsigned long inserttime; /* In jiffies */
+
+ struct list_head complete_list; /* P: xhci->complete_list_lock */
+};
+
+/*
+ * Locking in xhci.c
+ *
+ * spinlocks are used extensively to protect the many lists and data
+ * structures we have. It's not that pretty, but it's necessary. We
+ * need to be done with all of the locks (except complete_list_lock) when
+ * we call urb->complete. I've tried to make it simple enough so I don't
+ * have to spend hours racking my brain trying to figure out if the
+ * locking is safe.
+ *
+ * Here's the safe locking order to prevent deadlocks:
+ *
+ * #1 xhci->urb_list_lock
+ * #2 urb->lock
+ * #3 xhci->urb_remove_list_lock
+ * #4 xhci->complete_list_lock
+ *
+ * If you're going to grab 2 or more locks at once, ALWAYS grab the lock
+ * at the lowest level FIRST and NEVER grab locks at the same level at the
+ * same time.
+ *
+ * So, if you need xhci->urb_list_lock, grab it before you grab urb->lock
+ */
+
+/* -------------------------------------------------------------------------
+ Virtual Root HUB
+ ------------------------------------------------------------------------- */
+/* destination of request */
+#define RH_DEVICE 0x00
+#define RH_INTERFACE 0x01
+#define RH_ENDPOINT 0x02
+#define RH_OTHER 0x03
+
+#define RH_CLASS 0x20
+#define RH_VENDOR 0x40
+
+/* Requests: bRequest << 8 | bmRequestType */
+#define RH_GET_STATUS 0x0080
+#define RH_CLEAR_FEATURE 0x0100
+#define RH_SET_FEATURE 0x0300
+#define RH_SET_ADDRESS 0x0500
+#define RH_GET_DESCRIPTOR 0x0680
+#define RH_SET_DESCRIPTOR 0x0700
+#define RH_GET_CONFIGURATION 0x0880
+#define RH_SET_CONFIGURATION 0x0900
+#define RH_GET_STATE 0x0280
+#define RH_GET_INTERFACE 0x0A80
+#define RH_SET_INTERFACE 0x0B00
+#define RH_SYNC_FRAME 0x0C80
+/* Our Vendor Specific Request */
+#define RH_SET_EP 0x2000
+
+/* Hub port features */
+#define RH_PORT_CONNECTION 0x00
+#define RH_PORT_ENABLE 0x01
+#define RH_PORT_SUSPEND 0x02
+#define RH_PORT_OVER_CURRENT 0x03
+#define RH_PORT_RESET 0x04
+#define RH_PORT_POWER 0x08
+#define RH_PORT_LOW_SPEED 0x09
+#define RH_C_PORT_CONNECTION 0x10
+#define RH_C_PORT_ENABLE 0x11
+#define RH_C_PORT_SUSPEND 0x12
+#define RH_C_PORT_OVER_CURRENT 0x13
+#define RH_C_PORT_RESET 0x14
+
+/* Hub features */
+#define RH_C_HUB_LOCAL_POWER 0x00
+#define RH_C_HUB_OVER_CURRENT 0x01
+#define RH_DEVICE_REMOTE_WAKEUP 0x00
+#define RH_ENDPOINT_STALL 0x01
+
+/* Our Vendor Specific feature */
+#define RH_REMOVE_EP 0x00
+
+#define RH_ACK 0x01
+#define RH_REQ_ERR -1
+#define RH_NACK 0x00
+
+#endif
+
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/desc.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/desc.h
index af9c5b2a9e..85f022109c 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/desc.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/desc.h
@@ -103,7 +103,7 @@ static inline void clear_LDT(void)
* it slows down context switching. Noone uses it anyway.
*/
cpu = cpu; /* XXX avoid compiler warning */
- queue_set_ldt(0UL, 0);
+ xen_set_ldt(0UL, 0);
put_cpu();
}
@@ -118,14 +118,13 @@ static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
if (likely(!count))
segments = NULL;
- queue_set_ldt((unsigned long)segments, count);
+ xen_set_ldt((unsigned long)segments, count);
}
static inline void load_LDT(mm_context_t *pc)
{
int cpu = get_cpu();
load_LDT_nolock(pc, cpu);
- flush_page_update_queue();
put_cpu();
}
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/fixmap.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/fixmap.h
index ee7e4aeeba..2bd859ff5f 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/fixmap.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/fixmap.h
@@ -27,6 +27,7 @@
#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
+#include <asm-xen/gnttab.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
@@ -79,11 +80,14 @@ enum fixed_addresses {
#ifdef CONFIG_ACPI_BOOT
FIX_ACPI_BEGIN,
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
+ FIX_ACPI_RSDP_PAGE,
#endif
#ifdef CONFIG_PCI_MMCONFIG
FIX_PCIE_MCFG,
#endif
FIX_SHARED_INFO,
+ FIX_GNTTAB_BEGIN,
+ FIX_GNTTAB_END = FIX_GNTTAB_BEGIN + NR_GRANT_FRAMES - 1,
#ifdef CONFIG_XEN_PHYSDEV_ACCESS
#define NR_FIX_ISAMAPS 256
FIX_ISAMAP_END,
@@ -100,15 +104,9 @@ enum fixed_addresses {
extern void __set_fixmap (enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
-extern void __set_fixmap_ma (enum fixed_addresses idx,
- unsigned long mach, pgprot_t flags);
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
-#define set_fixmap_ma(idx, phys) \
- __set_fixmap_ma(idx, phys, PAGE_KERNEL)
-#define set_fixmap_ma_ro(idx, phys) \
- __set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
/*
* Some hardware wants to get fixmapped without caching.
*/
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h
new file mode 100644
index 0000000000..13ab9c3fde
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h
@@ -0,0 +1,520 @@
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERCALL_H__
+#define __HYPERCALL_H__
+#include <asm-xen/xen-public/xen.h>
+
+/*
+ * Assembler stubs for hyper-calls.
+ */
+
+static inline int
+HYPERVISOR_set_trap_table(
+ trap_info_t *table)
+{
+ int ret;
+ unsigned long ignore;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_set_trap_table), "1" (table)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_mmu_update(
+ mmu_update_t *req, int count, int *success_count, domid_t domid)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
+ "3" (success_count), "4" (domid)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_mmuext_op(
+ struct mmuext_op *op, int count, int *success_count, domid_t domid)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_mmuext_op), "1" (op), "2" (count),
+ "3" (success_count), "4" (domid)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_gdt(
+ unsigned long *frame_list, int entries)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
+ : "memory" );
+
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_stack_switch(
+ unsigned long ss, unsigned long esp)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+ unsigned long event_selector, unsigned long event_address,
+ unsigned long failsafe_selector, unsigned long failsafe_address)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
+ "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+ int set)
+{
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_fpu_taskswitch), "1" (set)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_yield(
+ void)
+{
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_yield)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_block(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_block)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_shutdown(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_reboot(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_suspend(
+ unsigned long srec)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ /* NB. On suspend, control software expects a suspend record in %esi. */
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=S" (ign2)
+ : "0" (__HYPERVISOR_sched_op),
+ "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)),
+ "S" (srec) : "memory");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_crash(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_crash << SCHEDOP_reasonshift))
+ : "memory" );
+
+ return ret;
+}
+
+static inline long
+HYPERVISOR_set_timer_op(
+ u64 timeout)
+{
+ int ret;
+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
+ unsigned long timeout_lo = (unsigned long)timeout;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_timer_op), "b" (timeout_lo), "c" (timeout_hi)
+ : "memory");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_dom0_op(
+ dom0_op_t *dom0_op)
+{
+ int ret;
+ unsigned long ign1;
+
+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
+ : "memory");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_debugreg(
+ int reg, unsigned long value)
+{
+ int ret;
+ unsigned long ign1, ign2;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
+ : "memory" );
+
+ return ret;
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(
+ int reg)
+{
+ unsigned long ret;
+ unsigned long ign;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_get_debugreg), "1" (reg)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_descriptor(
+ unsigned long ma, unsigned long word1, unsigned long word2)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
+ "3" (word2)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_dom_mem_op(
+ unsigned int op, unsigned long *extent_list,
+ unsigned long nr_extents, unsigned int extent_order)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4, ign5;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
+ "=D" (ign5)
+ : "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
+ "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_multicall(
+ void *call_list, int nr_calls)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(
+ unsigned long va, pte_t new_val, unsigned long flags)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_update_va_mapping),
+ "1" (va), "2" ((new_val).pte_low), "3" (flags)
+ : "memory" );
+
+ if ( unlikely(ret < 0) )
+ {
+ printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
+ va, (new_val).pte_low, flags);
+ BUG();
+ }
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_event_channel_op(
+ void *op)
+{
+ int ret;
+ unsigned long ignore;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_event_channel_op), "1" (op)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_xen_version(
+ int cmd)
+{
+ int ret;
+ unsigned long ignore;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_xen_version), "1" (cmd)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_console_io(
+ int cmd, int count, char *str)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_console_io), "1" (cmd), "2" (count), "3" (str)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_physdev_op(
+ void *physdev_op)
+{
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_grant_table_op(
+ unsigned int cmd, void *uop, unsigned int count)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_grant_table_op), "1" (cmd), "2" (uop), "3" (count)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(
+ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_update_va_mapping_otherdomain),
+ "1" (va), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
+ "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_vm_assist(
+ unsigned int cmd, unsigned int type)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_boot_vcpu(
+ unsigned long vcpu, vcpu_guest_context_t *ctxt)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_boot_vcpu), "1" (vcpu), "2" (ctxt)
+ : "memory");
+
+ return ret;
+}
+
+#endif /* __HYPERCALL_H__ */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
index 30f4d88b62..88c5faaf84 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
@@ -34,6 +34,7 @@
* Vectors 0x20-0x2f are used for ISA interrupts.
*/
+#if 0
/*
* Special IRQ vectors used by the SMP architecture, 0xf0-0xff
*
@@ -56,6 +57,10 @@
* sources per level' errata.
*/
#define LOCAL_TIMER_VECTOR 0xef
+#endif
+
+#define SPURIOUS_APIC_VECTOR 0xff
+#define ERROR_APIC_VECTOR 0xfe
/*
* First APIC vector available to drivers: (vectors 0x30-0xee)
@@ -65,8 +70,6 @@
#define FIRST_DEVICE_VECTOR 0x31
#define FIRST_SYSTEM_VECTOR 0xef
-#define TIMER_IRQ timer_irq
-
/*
* 16 8259A IRQ's, 208 potential APIC interrupt sources.
* Right now the APIC is mostly only used for SMP.
@@ -77,14 +80,18 @@
* the usable vector space is 0x20-0xff (224 vectors)
*/
-#if 0
+#define NR_IPIS 8
+
+#define RESCHEDULE_VECTOR 1
+#define INVALIDATE_TLB_VECTOR 2
+#define CALL_FUNCTION_VECTOR 3
+
/*
* The maximum number of vectors supported by i386 processors
* is limited to 256. For processors other than i386, NR_VECTORS
* should be changed accordingly.
*/
#define NR_VECTORS 256
-#endif
#define FPU_IRQ 13
@@ -103,10 +110,10 @@
*/
#define PIRQ_BASE 0
-#define NR_PIRQS 128
+#define NR_PIRQS 256
#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-#define NR_DYNIRQS 128
+#define NR_DYNIRQS 256
#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
#define NR_IRQ_VECTORS NR_IRQS
@@ -121,6 +128,8 @@
/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
extern int bind_virq_to_irq(int virq);
extern void unbind_virq_from_irq(int virq);
+extern int bind_ipi_on_cpu_to_irq(int cpu, int ipi);
+extern void unbind_ipi_on_cpu_from_irq(int cpu, int ipi);
extern int bind_evtchn_to_irq(int evtchn);
extern void unbind_evtchn_from_irq(int evtchn);
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
index 2bfd4df069..78886d339c 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
@@ -13,7 +13,12 @@ static char * __init machine_specific_memory_setup(void)
who = "Xen";
- start_pfn = 0;
+ /* In dom0, we have to start the fake e820 map above the first
+ * 1MB, in other domains, it can start at 0. */
+ if (xen_start_info.flags & SIF_INITDOMAIN)
+ start_pfn = 0x100;
+ else
+ start_pfn = 0;
max_pfn = xen_start_info.nr_pages;
e820.nr_map = 0;
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/smpboot_hooks.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/smpboot_hooks.h
new file mode 100644
index 0000000000..28adeaf244
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/smpboot_hooks.h
@@ -0,0 +1,55 @@
+/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
+ * which needs to alter them. */
+
+static inline void smpboot_clear_io_apic_irqs(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+ io_apic_irqs = 0;
+#endif
+}
+
+static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+{
+#if 1
+ printk("smpboot_setup_warm_reset_vector\n");
+#else
+ CMOS_WRITE(0xa, 0xf);
+ local_flush_tlb();
+ Dprintk("1.\n");
+ *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
+ Dprintk("2.\n");
+ *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
+ Dprintk("3.\n");
+#endif
+}
+
+static inline void smpboot_restore_warm_reset_vector(void)
+{
+ /*
+ * Install writable page 0 entry to set BIOS data area.
+ */
+ local_flush_tlb();
+
+ /*
+ * Paranoid: Set warm reset code and vector here back
+ * to default values.
+ */
+ CMOS_WRITE(0, 0xf);
+
+ *((volatile long *) phys_to_virt(0x467)) = 0;
+}
+
+static inline void smpboot_setup_io_apic(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+ /*
+ * Here we can be sure that there is an IO-APIC in the system. Let's
+ * go and set it up:
+ */
+ if (!skip_ioapic_setup && nr_ioapics)
+ setup_IO_APIC();
+#endif
+}
+
+
+#define smp_found_config (HYPERVISOR_shared_info->n_vcpu > 1)
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu.h
new file mode 100644
index 0000000000..0e27763469
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu.h
@@ -0,0 +1,22 @@
+#ifndef __i386_MMU_H
+#define __i386_MMU_H
+
+#include <asm/semaphore.h>
+/*
+ * The i386 doesn't have a mmu context, but
+ * we put the segment information here.
+ *
+ * cpu_vm_mask is used to optimize ldt flushing.
+ */
+typedef struct {
+ int size;
+ struct semaphore sem;
+ void *ldt;
+ unsigned pinned:1;
+} mm_context_t;
+
+/* mm/memory.c:exit_mmap hook */
+extern void _arch_exit_mmap(struct mm_struct *mm);
+#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
index a815fad09d..b1fe2fb594 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
@@ -16,38 +16,73 @@ void destroy_context(struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
-#ifdef CONFIG_SMP
+#if 0 /* XEN: no lazy tlb */
unsigned cpu = smp_processor_id();
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
#endif
}
+#define prepare_arch_switch(rq,next) __prepare_arch_switch()
+#define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
+#define task_running(rq, p) ((rq)->curr == (p))
+
+static inline void __prepare_arch_switch(void)
+{
+ /*
+ * Save away %fs and %gs. No need to save %es and %ds, as those
+ * are always kernel segments while inside the kernel. Must
+ * happen before reload of cr3/ldt (i.e., not in __switch_to).
+ */
+ __asm__ __volatile__ ( "movl %%fs,%0 ; movl %%gs,%1"
+ : "=m" (*(int *)&current->thread.fs),
+ "=m" (*(int *)&current->thread.gs));
+ __asm__ __volatile__ ( "movl %0,%%fs ; movl %0,%%gs"
+ : : "r" (0) );
+}
+
+extern void mm_pin(struct mm_struct *mm);
+extern void mm_unpin(struct mm_struct *mm);
+
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
int cpu = smp_processor_id();
+ struct mmuext_op _op[2], *op = _op;
if (likely(prev != next)) {
+ if (!next->context.pinned)
+ mm_pin(next);
+
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
+#if 0 /* XEN: no lazy tlb */
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
per_cpu(cpu_tlbstate, cpu).active_mm = next;
#endif
cpu_set(cpu, next->cpu_vm_mask);
- /* Re-load page tables */
- load_cr3(next->pgd);
+ /* Re-load page tables: load_cr3(next->pgd) */
+ per_cpu(cur_pgd, cpu) = next->pgd;
+ op->cmd = MMUEXT_NEW_BASEPTR;
+ op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
+ op++;
/*
* load the LDT, if the LDT is different:
*/
- if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context, cpu);
+ if (unlikely(prev->context.ldt != next->context.ldt)) {
+ /* load_LDT_nolock(&next->context, cpu) */
+ op->cmd = MMUEXT_SET_LDT;
+ op->linear_addr = (unsigned long)next->context.ldt;
+ op->nr_ents = next->context.size;
+ op++;
+ }
+
+ BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
}
-#ifdef CONFIG_SMP
+#if 0 /* XEN: no lazy tlb */
else {
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
@@ -66,9 +101,7 @@ static inline void switch_mm(struct mm_struct *prev,
#define deactivate_mm(tsk, mm) \
asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
-#define activate_mm(prev, next) do { \
- switch_mm((prev),(next),NULL); \
- flush_page_update_queue(); \
-} while (0)
+#define activate_mm(prev, next) \
+ switch_mm((prev),(next),NULL)
#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/msr.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/msr.h
deleted file mode 100644
index 4cc4f318ba..0000000000
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/msr.h
+++ /dev/null
@@ -1,272 +0,0 @@
-#ifndef __ASM_MSR_H
-#define __ASM_MSR_H
-
-#include <linux/smp.h>
-#include <asm-xen/hypervisor.h>
-
-/*
- * Access to machine-specific registers (available on 586 and better only)
- * Note: the rd* operations modify the parameters directly (without using
- * pointer indirection), this allows gcc to optimize better
- */
-
-#define rdmsr(_msr,_val1,_val2) do { \
- dom0_op_t op; \
- op.cmd = DOM0_MSR; \
- op.u.msr.write = 0; \
- op.u.msr.msr = (_msr); \
- op.u.msr.cpu_mask = (1 << smp_processor_id()); \
- HYPERVISOR_dom0_op(&op); \
- (_val1) = op.u.msr.out1; \
- (_val2) = op.u.msr.out2; \
-} while(0)
-
-#define wrmsr(_msr,_val1,_val2) do { \
- dom0_op_t op; \
- op.cmd = DOM0_MSR; \
- op.u.msr.write = 1; \
- op.u.msr.cpu_mask = (1 << smp_processor_id()); \
- op.u.msr.msr = (_msr); \
- op.u.msr.in1 = (_val1); \
- op.u.msr.in2 = (_val2); \
- HYPERVISOR_dom0_op(&op); \
-} while(0)
-
-#define rdmsrl(msr,val) do { \
- unsigned long l__,h__; \
- rdmsr (msr, l__, h__); \
- val = l__; \
- val |= ((u64)h__<<32); \
-} while(0)
-
-static inline void wrmsrl (unsigned long msr, unsigned long long val)
-{
- unsigned long lo, hi;
- lo = (unsigned long) val;
- hi = val >> 32;
- wrmsr (msr, lo, hi);
-}
-
-#define rdtsc(low,high) \
- __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
-
-#define rdtscl(low) \
- __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
-
-#define rdtscll(val) \
- __asm__ __volatile__("rdtsc" : "=A" (val))
-
-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
-
-#define rdpmc(counter,low,high) \
- __asm__ __volatile__("rdpmc" \
- : "=a" (low), "=d" (high) \
- : "c" (counter))
-
-/* symbolic names for some interesting MSRs */
-/* Intel defined MSRs. */
-#define MSR_IA32_P5_MC_ADDR 0
-#define MSR_IA32_P5_MC_TYPE 1
-#define MSR_IA32_PLATFORM_ID 0x17
-#define MSR_IA32_EBL_CR_POWERON 0x2a
-
-#define MSR_IA32_APICBASE 0x1b
-#define MSR_IA32_APICBASE_BSP (1<<8)
-#define MSR_IA32_APICBASE_ENABLE (1<<11)
-#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-
-#define MSR_IA32_UCODE_WRITE 0x79
-#define MSR_IA32_UCODE_REV 0x8b
-
-#define MSR_P6_PERFCTR0 0xc1
-#define MSR_P6_PERFCTR1 0xc2
-
-#define MSR_IA32_BBL_CR_CTL 0x119
-
-#define MSR_IA32_SYSENTER_CS 0x174
-#define MSR_IA32_SYSENTER_ESP 0x175
-#define MSR_IA32_SYSENTER_EIP 0x176
-
-#define MSR_IA32_MCG_CAP 0x179
-#define MSR_IA32_MCG_STATUS 0x17a
-#define MSR_IA32_MCG_CTL 0x17b
-
-/* P4/Xeon+ specific */
-#define MSR_IA32_MCG_EAX 0x180
-#define MSR_IA32_MCG_EBX 0x181
-#define MSR_IA32_MCG_ECX 0x182
-#define MSR_IA32_MCG_EDX 0x183
-#define MSR_IA32_MCG_ESI 0x184
-#define MSR_IA32_MCG_EDI 0x185
-#define MSR_IA32_MCG_EBP 0x186
-#define MSR_IA32_MCG_ESP 0x187
-#define MSR_IA32_MCG_EFLAGS 0x188
-#define MSR_IA32_MCG_EIP 0x189
-#define MSR_IA32_MCG_RESERVED 0x18A
-
-#define MSR_P6_EVNTSEL0 0x186
-#define MSR_P6_EVNTSEL1 0x187
-
-#define MSR_IA32_PERF_STATUS 0x198
-#define MSR_IA32_PERF_CTL 0x199
-
-#define MSR_IA32_THERM_CONTROL 0x19a
-#define MSR_IA32_THERM_INTERRUPT 0x19b
-#define MSR_IA32_THERM_STATUS 0x19c
-#define MSR_IA32_MISC_ENABLE 0x1a0
-
-#define MSR_IA32_DEBUGCTLMSR 0x1d9
-#define MSR_IA32_LASTBRANCHFROMIP 0x1db
-#define MSR_IA32_LASTBRANCHTOIP 0x1dc
-#define MSR_IA32_LASTINTFROMIP 0x1dd
-#define MSR_IA32_LASTINTTOIP 0x1de
-
-#define MSR_IA32_MC0_CTL 0x400
-#define MSR_IA32_MC0_STATUS 0x401
-#define MSR_IA32_MC0_ADDR 0x402
-#define MSR_IA32_MC0_MISC 0x403
-
-/* Pentium IV performance counter MSRs */
-#define MSR_P4_BPU_PERFCTR0 0x300
-#define MSR_P4_BPU_PERFCTR1 0x301
-#define MSR_P4_BPU_PERFCTR2 0x302
-#define MSR_P4_BPU_PERFCTR3 0x303
-#define MSR_P4_MS_PERFCTR0 0x304
-#define MSR_P4_MS_PERFCTR1 0x305
-#define MSR_P4_MS_PERFCTR2 0x306
-#define MSR_P4_MS_PERFCTR3 0x307
-#define MSR_P4_FLAME_PERFCTR0 0x308
-#define MSR_P4_FLAME_PERFCTR1 0x309
-#define MSR_P4_FLAME_PERFCTR2 0x30a
-#define MSR_P4_FLAME_PERFCTR3 0x30b
-#define MSR_P4_IQ_PERFCTR0 0x30c
-#define MSR_P4_IQ_PERFCTR1 0x30d
-#define MSR_P4_IQ_PERFCTR2 0x30e
-#define MSR_P4_IQ_PERFCTR3 0x30f
-#define MSR_P4_IQ_PERFCTR4 0x310
-#define MSR_P4_IQ_PERFCTR5 0x311
-#define MSR_P4_BPU_CCCR0 0x360
-#define MSR_P4_BPU_CCCR1 0x361
-#define MSR_P4_BPU_CCCR2 0x362
-#define MSR_P4_BPU_CCCR3 0x363
-#define MSR_P4_MS_CCCR0 0x364
-#define MSR_P4_MS_CCCR1 0x365
-#define MSR_P4_MS_CCCR2 0x366
-#define MSR_P4_MS_CCCR3 0x367
-#define MSR_P4_FLAME_CCCR0 0x368
-#define MSR_P4_FLAME_CCCR1 0x369
-#define MSR_P4_FLAME_CCCR2 0x36a
-#define MSR_P4_FLAME_CCCR3 0x36b
-#define MSR_P4_IQ_CCCR0 0x36c
-#define MSR_P4_IQ_CCCR1 0x36d
-#define MSR_P4_IQ_CCCR2 0x36e
-#define MSR_P4_IQ_CCCR3 0x36f
-#define MSR_P4_IQ_CCCR4 0x370
-#define MSR_P4_IQ_CCCR5 0x371
-#define MSR_P4_ALF_ESCR0 0x3ca
-#define MSR_P4_ALF_ESCR1 0x3cb
-#define MSR_P4_BPU_ESCR0 0x3b2
-#define MSR_P4_BPU_ESCR1 0x3b3
-#define MSR_P4_BSU_ESCR0 0x3a0
-#define MSR_P4_BSU_ESCR1 0x3a1
-#define MSR_P4_CRU_ESCR0 0x3b8
-#define MSR_P4_CRU_ESCR1 0x3b9
-#define MSR_P4_CRU_ESCR2 0x3cc
-#define MSR_P4_CRU_ESCR3 0x3cd
-#define MSR_P4_CRU_ESCR4 0x3e0
-#define MSR_P4_CRU_ESCR5 0x3e1
-#define MSR_P4_DAC_ESCR0 0x3a8
-#define MSR_P4_DAC_ESCR1 0x3a9
-#define MSR_P4_FIRM_ESCR0 0x3a4
-#define MSR_P4_FIRM_ESCR1 0x3a5
-#define MSR_P4_FLAME_ESCR0 0x3a6
-#define MSR_P4_FLAME_ESCR1 0x3a7
-#define MSR_P4_FSB_ESCR0 0x3a2
-#define MSR_P4_FSB_ESCR1 0x3a3
-#define MSR_P4_IQ_ESCR0 0x3ba
-#define MSR_P4_IQ_ESCR1 0x3bb
-#define MSR_P4_IS_ESCR0 0x3b4
-#define MSR_P4_IS_ESCR1 0x3b5
-#define MSR_P4_ITLB_ESCR0 0x3b6
-#define MSR_P4_ITLB_ESCR1 0x3b7
-#define MSR_P4_IX_ESCR0 0x3c8
-#define MSR_P4_IX_ESCR1 0x3c9
-#define MSR_P4_MOB_ESCR0 0x3aa
-#define MSR_P4_MOB_ESCR1 0x3ab
-#define MSR_P4_MS_ESCR0 0x3c0
-#define MSR_P4_MS_ESCR1 0x3c1
-#define MSR_P4_PMH_ESCR0 0x3ac
-#define MSR_P4_PMH_ESCR1 0x3ad
-#define MSR_P4_RAT_ESCR0 0x3bc
-#define MSR_P4_RAT_ESCR1 0x3bd
-#define MSR_P4_SAAT_ESCR0 0x3ae
-#define MSR_P4_SAAT_ESCR1 0x3af
-#define MSR_P4_SSU_ESCR0 0x3be
-#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */
-#define MSR_P4_TBPU_ESCR0 0x3c2
-#define MSR_P4_TBPU_ESCR1 0x3c3
-#define MSR_P4_TC_ESCR0 0x3c4
-#define MSR_P4_TC_ESCR1 0x3c5
-#define MSR_P4_U2L_ESCR0 0x3b0
-#define MSR_P4_U2L_ESCR1 0x3b1
-
-/* AMD Defined MSRs */
-#define MSR_K6_EFER 0xC0000080
-#define MSR_K6_STAR 0xC0000081
-#define MSR_K6_WHCR 0xC0000082
-#define MSR_K6_UWCCR 0xC0000085
-#define MSR_K6_EPMR 0xC0000086
-#define MSR_K6_PSOR 0xC0000087
-#define MSR_K6_PFIR 0xC0000088
-
-#define MSR_K7_EVNTSEL0 0xC0010000
-#define MSR_K7_EVNTSEL1 0xC0010001
-#define MSR_K7_EVNTSEL2 0xC0010002
-#define MSR_K7_EVNTSEL3 0xC0010003
-#define MSR_K7_PERFCTR0 0xC0010004
-#define MSR_K7_PERFCTR1 0xC0010005
-#define MSR_K7_PERFCTR2 0xC0010006
-#define MSR_K7_PERFCTR3 0xC0010007
-#define MSR_K7_HWCR 0xC0010015
-#define MSR_K7_CLK_CTL 0xC001001b
-#define MSR_K7_FID_VID_CTL 0xC0010041
-#define MSR_K7_FID_VID_STATUS 0xC0010042
-
-/* extended feature register */
-#define MSR_EFER 0xc0000080
-
-/* EFER bits: */
-
-/* Execute Disable enable */
-#define _EFER_NX 11
-#define EFER_NX (1<<_EFER_NX)
-
-/* Centaur-Hauls/IDT defined MSRs. */
-#define MSR_IDT_FCR1 0x107
-#define MSR_IDT_FCR2 0x108
-#define MSR_IDT_FCR3 0x109
-#define MSR_IDT_FCR4 0x10a
-
-#define MSR_IDT_MCR0 0x110
-#define MSR_IDT_MCR1 0x111
-#define MSR_IDT_MCR2 0x112
-#define MSR_IDT_MCR3 0x113
-#define MSR_IDT_MCR4 0x114
-#define MSR_IDT_MCR5 0x115
-#define MSR_IDT_MCR6 0x116
-#define MSR_IDT_MCR7 0x117
-#define MSR_IDT_MCR_CTRL 0x120
-
-/* VIA Cyrix defined MSRs*/
-#define MSR_VIA_FCR 0x1107
-#define MSR_VIA_LONGHAUL 0x110a
-#define MSR_VIA_RNG 0x110b
-#define MSR_VIA_BCR2 0x1147
-
-/* Transmeta defined MSRs */
-#define MSR_TMTA_LONGRUN_CTRL 0x80868010
-#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
-#define MSR_TMTA_LRTI_READOUT 0x80868018
-#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
-
-#endif /* __ASM_MSR_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgalloc.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
index 520e98d072..404da2640b 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
@@ -11,12 +11,23 @@
#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
-#define pmd_populate(mm, pmd, pte) do { \
- set_pmd(pmd, __pmd(_PAGE_TABLE + \
- ((unsigned long long)page_to_pfn(pte) << \
- (unsigned long long) PAGE_SHIFT))); \
- flush_page_update_queue(); \
+#define pmd_populate(mm, pmd, pte) \
+do { \
+ if (unlikely((mm)->context.pinned)) { \
+ if (!PageHighMem(pte)) \
+ HYPERVISOR_update_va_mapping( \
+ (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT),\
+ pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0);\
+ set_pmd(pmd, __pmd(_PAGE_TABLE + \
+ ((unsigned long long)page_to_pfn(pte) << \
+ (unsigned long long) PAGE_SHIFT))); \
+ } else { \
+ *(pmd) = __pmd(_PAGE_TABLE + \
+ ((unsigned long long)page_to_pfn(pte) << \
+ (unsigned long long) PAGE_SHIFT)); \
+ } \
} while (0)
+
/*
* Allocate and free page tables.
*/
@@ -30,7 +41,6 @@ static inline void pte_free_kernel(pte_t *pte)
{
free_page((unsigned long)pte);
make_page_writable(pte);
- flush_page_update_queue();
}
extern void pte_free(struct page *pte);
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
index 5e070af2ab..b25099b6fc 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
@@ -13,29 +13,16 @@
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
-#define set_pte_batched(pteptr, pteval) \
- queue_l1_entry_update(pteptr, (pteval).pte_low)
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
-#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
-/*
- * A note on implementation of this atomic 'get-and-clear' operation.
- * This is actually very simple because Xen Linux can only run on a single
- * processor. Therefore, we cannot race other processors setting the 'accessed'
- * or 'dirty' bits on a page-table entry.
- * Even if pages are shared between domains, that is not a problem because
- * each domain will have separate page tables, with their own versions of
- * accessed & dirty state.
- */
-static inline pte_t ptep_get_and_clear(pte_t *xp)
-{
- pte_t pte = *xp;
- if (pte.pte_low)
- set_pte(xp, __pte_ma(0));
- return pte;
-}
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
+#else
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+#endif
+#define ptep_get_and_clear(xp) __pte_ma(xchg(&(xp)->pte_low, 0))
#define pte_same(a, b) ((a).pte_low == (b).pte_low)
/*
* We detect special mappings in one of two ways:
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h
index e008baf6fb..f611f04781 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h
@@ -35,12 +35,9 @@ extern unsigned long empty_zero_page[1024];
extern pgd_t swapper_pg_dir[1024];
extern kmem_cache_t *pgd_cache;
extern kmem_cache_t *pmd_cache;
-extern kmem_cache_t *pte_cache;
extern spinlock_t pgd_lock;
extern struct page *pgd_list;
-void pte_ctor(void *, kmem_cache_t *, unsigned long);
-void pte_dtor(void *, kmem_cache_t *, unsigned long);
void pmd_ctor(void *, kmem_cache_t *, unsigned long);
void pgd_ctor(void *, kmem_cache_t *, unsigned long);
void pgd_dtor(void *, kmem_cache_t *, unsigned long);
@@ -89,9 +86,6 @@ void paging_init(void);
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#endif
-extern void *high_memory;
-extern unsigned long vmalloc_earlyreserve;
-
/*
* The 4MB page is guessing.. Detailed in the infamous "Chapter H"
* of the Pentium details, but assuming intel did the straightforward
@@ -214,7 +208,7 @@ extern unsigned long pg0[];
/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
can temporarily clear it. */
#define pmd_present(x) (pmd_val(x))
-/* pmd_clear below */
+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
@@ -254,34 +248,28 @@ static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return p
static inline int ptep_test_and_clear_dirty(pte_t *ptep)
{
- pte_t pte = *ptep;
- int ret = pte_dirty(pte);
- if (ret)
- xen_l1_entry_update(ptep, pte_mkclean(pte).pte_low);
- return ret;
+ if (!pte_dirty(*ptep))
+ return 0;
+ return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
}
static inline int ptep_test_and_clear_young(pte_t *ptep)
{
- pte_t pte = *ptep;
- int ret = pte_young(pte);
- if (ret)
- xen_l1_entry_update(ptep, pte_mkold(pte).pte_low);
- return ret;
+ if (!pte_young(*ptep))
+ return 0;
+ return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
}
static inline void ptep_set_wrprotect(pte_t *ptep)
{
- pte_t pte = *ptep;
- if (pte_write(pte))
- set_pte(ptep, pte_wrprotect(pte));
+ if (pte_write(*ptep))
+ clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
}
static inline void ptep_mkdirty(pte_t *ptep)
{
- pte_t pte = *ptep;
- if (!pte_dirty(pte))
- xen_l1_entry_update(ptep, pte_mkdirty(pte).pte_low);
+ if (!pte_dirty(*ptep))
+ set_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
}
/*
@@ -317,11 +305,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define page_pte(page) page_pte_prot(page, __pgprot(0))
-#define pmd_clear(xp) do { \
- set_pmd(xp, __pmd(0)); \
- xen_flush_page_update_queue(); \
-} while (0)
-
#define pmd_large(pmd) \
((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
@@ -421,8 +404,7 @@ extern void noexec_setup(const char *str);
do { \
if (__dirty) { \
if ( likely((__vma)->vm_mm == current->mm) ) { \
- xen_flush_page_update_queue(); \
- HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT, (__entry), UVMF_INVLPG); \
+ HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits)); \
} else { \
xen_l1_entry_update((__ptep), (__entry).pte_low); \
flush_tlb_page((__vma), (__address)); \
@@ -440,21 +422,28 @@ do { \
#define ptep_establish_new(__vma, __address, __ptep, __entry) \
do { \
if (likely((__vma)->vm_mm == current->mm)) { \
- xen_flush_page_update_queue(); \
- HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT, \
+ HYPERVISOR_update_va_mapping((__address), \
__entry, 0); \
} else { \
xen_l1_entry_update((__ptep), (__entry).pte_low); \
} \
} while (0)
-/* NOTE: make_page* callers must call flush_page_update_queue() */
+#ifndef CONFIG_XEN_SHADOW_MODE
void make_lowmem_page_readonly(void *va);
void make_lowmem_page_writable(void *va);
void make_page_readonly(void *va);
void make_page_writable(void *va);
void make_pages_readonly(void *va, unsigned int nr);
void make_pages_writable(void *va, unsigned int nr);
+#else
+#define make_lowmem_page_readonly(_va) ((void)0)
+#define make_lowmem_page_writable(_va) ((void)0)
+#define make_page_readonly(_va) ((void)0)
+#define make_page_writable(_va) ((void)0)
+#define make_pages_readonly(_va, _nr) ((void)0)
+#define make_pages_writable(_va, _nr) ((void)0)
+#endif
#define virt_to_ptep(__va) \
({ \
@@ -477,7 +466,6 @@ void make_pages_writable(void *va, unsigned int nr);
#define kern_addr_valid(addr) (1)
#endif /* !CONFIG_DISCONTIGMEM */
-#define DOMID_LOCAL (0xFFFFU)
int direct_remap_area_pages(struct mm_struct *mm,
unsigned long address,
unsigned long machine_addr,
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/processor.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/processor.h
index a6a5a9cf83..fd54b409e2 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/processor.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/processor.h
@@ -88,7 +88,7 @@ extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data;
extern struct tss_struct doublefault_tss;
DECLARE_PER_CPU(struct tss_struct, init_tss);
-extern pgd_t *cur_pgd; /* XXXsmp */
+DECLARE_PER_CPU(pgd_t *, cur_pgd);
#ifdef CONFIG_SMP
extern struct cpuinfo_x86 cpu_data[];
@@ -193,8 +193,8 @@ static inline unsigned int cpuid_edx(unsigned int op)
}
#define load_cr3(pgdir) do { \
- queue_pt_switch(__pa(pgdir)); \
- cur_pgd = pgdir; /* XXXsmp */ \
+ xen_pt_switch(__pa(pgdir)); \
+ per_cpu(cur_pgd, smp_processor_id()) = pgdir; \
} while (/* CONSTCOND */0)
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/segment.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/segment.h
index 288243f05a..5496d69023 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/segment.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/segment.h
@@ -74,17 +74,17 @@
#define GDT_ENTRY_DOUBLEFAULT_TSS 31
/*
- * The GDT has LAST_RESERVED_GDT_ENTRY + 1 entries
+ * The GDT has 32 entries
*/
-#define GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY + 1)
+#define GDT_ENTRIES 32
#define GDT_SIZE (GDT_ENTRIES * 8)
/* Simple and small GDT entries for booting only */
-#define __BOOT_CS FLAT_GUESTOS_CS
+#define __BOOT_CS FLAT_KERNEL_CS
-#define __BOOT_DS FLAT_GUESTOS_DS
+#define __BOOT_DS FLAT_KERNEL_DS
/*
* The interrupt descriptor table has room for 256 idt's,
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/spinlock.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/spinlock.h
new file mode 100644
index 0000000000..d7189a7c28
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/spinlock.h
@@ -0,0 +1,250 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/atomic.h>
+#include <asm/rwlock.h>
+#include <asm/page.h>
+#include <linux/config.h>
+#include <linux/compiler.h>
+
+asmlinkage int printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+/*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+ */
+
+typedef struct {
+ volatile unsigned int slock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned magic;
+#endif
+#ifdef CONFIG_PREEMPT
+ unsigned int break_lock;
+#endif
+} spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
+#else
+#define SPINLOCK_MAGIC_INIT /* */
+#endif
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
+
+#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
+#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
+
+#define spin_lock_string \
+ "\n1:\t" \
+ "lock ; decb %0\n\t" \
+ "jns 3f\n" \
+ "2:\t" \
+ "rep;nop\n\t" \
+ "cmpb $0,%0\n\t" \
+ "jle 2b\n\t" \
+ "jmp 1b\n" \
+ "3:\n\t"
+
+#define spin_lock_string_flags \
+ "\n1:\t" \
+ "lock ; decb %0\n\t" \
+ "jns 4f\n\t" \
+ "2:\t" \
+ "testl $0x200, %1\n\t" \
+ "jz 3f\n\t" \
+ "#sti\n\t" \
+ "3:\t" \
+ "rep;nop\n\t" \
+ "cmpb $0, %0\n\t" \
+ "jle 3b\n\t" \
+ "#cli\n\t" \
+ "jmp 1b\n" \
+ "4:\n\t"
+
+/*
+ * This works. Despite all the confusion.
+ * (except on PPro SMP or if we are using OOSTORE)
+ * (PPro errata 66, 92)
+ */
+
+#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
+
+#define spin_unlock_string \
+ "movb $1,%0" \
+ :"=m" (lock->slock) : : "memory"
+
+
+static inline void _raw_spin_unlock(spinlock_t *lock)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ BUG_ON(lock->magic != SPINLOCK_MAGIC);
+ BUG_ON(!spin_is_locked(lock));
+#endif
+ __asm__ __volatile__(
+ spin_unlock_string
+ );
+}
+
+#else
+
+#define spin_unlock_string \
+ "xchgb %b0, %1" \
+ :"=q" (oldval), "=m" (lock->slock) \
+ :"0" (oldval) : "memory"
+
+static inline void _raw_spin_unlock(spinlock_t *lock)
+{
+ char oldval = 1;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ BUG_ON(lock->magic != SPINLOCK_MAGIC);
+ BUG_ON(!spin_is_locked(lock));
+#endif
+ __asm__ __volatile__(
+ spin_unlock_string
+ );
+}
+
+#endif
+
+static inline int _raw_spin_trylock(spinlock_t *lock)
+{
+ char oldval;
+ __asm__ __volatile__(
+ "xchgb %b0,%1"
+ :"=q" (oldval), "=m" (lock->slock)
+ :"0" (0) : "memory");
+ return oldval > 0;
+}
+
+static inline void _raw_spin_lock(spinlock_t *lock)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
+ printk("eip: %p\n", __builtin_return_address(0));
+ BUG();
+ }
+#endif
+ __asm__ __volatile__(
+ spin_lock_string
+ :"=m" (lock->slock) : : "memory");
+}
+
+static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
+ printk("eip: %p\n", __builtin_return_address(0));
+ BUG();
+ }
+#endif
+ __asm__ __volatile__(
+ spin_lock_string_flags
+ :"=m" (lock->slock) : "r" (flags) : "memory");
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct {
+ volatile unsigned int lock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned magic;
+#endif
+#ifdef CONFIG_PREEMPT
+ unsigned int break_lock;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC 0xdeaf1eed
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
+#else
+#define RWLOCK_MAGIC_INIT /* */
+#endif
+
+#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
+
+#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+/*
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ *
+ * Changed to use the same technique as rw semaphores. See
+ * semaphore.h for details. -ben
+ */
+/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
+
+static inline void _raw_read_lock(rwlock_t *rw)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ BUG_ON(rw->magic != RWLOCK_MAGIC);
+#endif
+ __build_read_lock(rw, "__read_lock_failed");
+}
+
+static inline void _raw_write_lock(rwlock_t *rw)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ BUG_ON(rw->magic != RWLOCK_MAGIC);
+#endif
+ __build_write_lock(rw, "__write_lock_failed");
+}
+
+#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
+#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
+
+static inline int _raw_read_trylock(rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ atomic_dec(count);
+ if (atomic_read(count) >= 0)
+ return 1;
+ atomic_inc(count);
+ return 0;
+}
+
+static inline int _raw_write_trylock(rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+ return 1;
+ atomic_add(RW_LOCK_BIAS, count);
+ return 0;
+}
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/system.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/system.h
index ed4f1b5673..7fe4f04d1f 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/system.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/system.h
@@ -8,7 +8,6 @@
#include <asm/segment.h>
#include <asm/cpufeature.h>
#include <asm-xen/hypervisor.h>
-#include <asm-xen/evtchn.h>
#ifdef __KERNEL__
@@ -107,28 +106,21 @@ static inline unsigned long _get_base(char * addr)
/*
* Clear and set 'TS' bit respectively
*/
-/* NB. 'clts' is done for us by Xen during virtual trap. */
-#define clts() ((void)0)
+#define clts() (HYPERVISOR_fpu_taskswitch(0))
#define read_cr0() \
BUG();
#define write_cr0(x) \
BUG();
-
#define read_cr4() \
BUG();
#define write_cr4(x) \
BUG();
-#define stts() (HYPERVISOR_fpu_taskswitch())
+#define stts() (HYPERVISOR_fpu_taskswitch(1))
#endif /* __KERNEL__ */
-static inline void wbinvd(void)
-{
- mmu_update_t u;
- u.ptr = MMU_EXTENDED_COMMAND;
- u.val = MMUEXT_FLUSH_CACHE;
- (void)HYPERVISOR_mmu_update(&u, 1, NULL);
-}
+#define wbinvd() \
+ __asm__ __volatile__ ("wbinvd": : :"memory");
static inline unsigned long get_limit(unsigned long segment)
{
@@ -451,63 +443,70 @@ struct alt_instr {
#define __cli() \
do { \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
+ vcpu_info_t *_vcpu; \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu->evtchn_upcall_mask = 1; \
+ preempt_enable_no_resched(); \
barrier(); \
} while (0)
#define __sti() \
do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
+ vcpu_info_t *_vcpu; \
barrier(); \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu->evtchn_upcall_mask = 0; \
barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+ preempt_enable(); \
} while (0)
#define __save_flags(x) \
do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
+ vcpu_info_t *_vcpu; \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ (x) = _vcpu->evtchn_upcall_mask; \
} while (0)
#define __restore_flags(x) \
do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
+ vcpu_info_t *_vcpu; \
barrier(); \
- if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) { \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
- } \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+ preempt_enable(); \
+ } else \
+ preempt_enable_no_resched(); \
} while (0)
-#define safe_halt() ((void)0)
+#define safe_halt() ((void)0)
#define __save_and_cli(x) \
do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
+ vcpu_info_t *_vcpu; \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ (x) = _vcpu->evtchn_upcall_mask; \
+ _vcpu->evtchn_upcall_mask = 1; \
+ preempt_enable_no_resched(); \
barrier(); \
} while (0)
-#define __save_and_sti(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
-} while (0)
-
#define local_irq_save(x) __save_and_cli(x)
#define local_irq_restore(x) __restore_flags(x)
#define local_save_flags(x) __save_flags(x)
#define local_irq_disable() __cli()
#define local_irq_enable() __sti()
-#define irqs_disabled() HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
+#define irqs_disabled() \
+ HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask
/*
* disable hlt during certain critical i/o operations
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/tlbflush.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
index 4d93bb317a..4d13a650a2 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
@@ -7,17 +7,10 @@
#define __flush_tlb() xen_tlb_flush()
#define __flush_tlb_global() xen_tlb_flush()
+#define __flush_tlb_all() xen_tlb_flush()
extern unsigned long pgkern_mask;
-# define __flush_tlb_all() \
- do { \
- if (cpu_has_pge) \
- __flush_tlb_global(); \
- else \
- __flush_tlb(); \
- } while (0)
-
#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
#define __flush_tlb_single(addr) xen_invlpg(addr)
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/xor.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/xor.h
deleted file mode 100644
index 79a45debc6..0000000000
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/xor.h
+++ /dev/null
@@ -1,884 +0,0 @@
-/*
- * include/asm-i386/xor.h
- *
- * Optimized RAID-5 checksumming functions for MMX and SSE.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * High-speed RAID5 checksumming functions utilizing MMX instructions.
- * Copyright (C) 1998 Ingo Molnar.
- */
-
-#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
-#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
-#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
-#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
-#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
-#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
-
-#include <asm/i387.h>
-
-static void
-xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 7;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- ST(i,0) \
- XO1(i+1,1) \
- ST(i+1,1) \
- XO1(i+2,2) \
- ST(i+2,2) \
- XO1(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- kernel_fpu_end();
-}
-
-static void
-xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 7;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- ST(i,0) \
- XO2(i+1,1) \
- ST(i+1,1) \
- XO2(i+2,2) \
- ST(i+2,2) \
- XO2(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- :
- : "memory");
-
- kernel_fpu_end();
-}
-
-static void
-xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 7;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- ST(i,0) \
- XO3(i+1,1) \
- ST(i+1,1) \
- XO3(i+2,2) \
- ST(i+2,2) \
- XO3(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " addl $128, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory");
-
- kernel_fpu_end();
-}
-
-
-static void
-xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 7;
-
- kernel_fpu_begin();
-
- /* Make sure GCC forgets anything it knows about p4 or p5,
- such that it won't pass to the asm volatile below a
- register that is shared with any other variable. That's
- because we modify p4 and p5 there, but we can't mark them
- as read/write, otherwise we'd overflow the 10-asm-operands
- limit of GCC < 3.1. */
- __asm__ ("" : "+r" (p4), "+r" (p5));
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- XO4(i,0) \
- ST(i,0) \
- XO4(i+1,1) \
- ST(i+1,1) \
- XO4(i+2,2) \
- ST(i+2,2) \
- XO4(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " addl $128, %4 ;\n"
- " addl $128, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- /* p4 and p5 were modified, and now the variables are dead.
- Clobber them just to be sure nobody does something stupid
- like assuming they have some legal value. */
- __asm__ ("" : "=r" (p4), "=r" (p5));
-
- kernel_fpu_end();
-}
-
-#undef LD
-#undef XO1
-#undef XO2
-#undef XO3
-#undef XO4
-#undef ST
-#undef BLOCK
-
-static void
-xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 6;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- " .align 32 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- kernel_fpu_end();
-}
-
-static void
-xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 6;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- :
- : "memory" );
-
- kernel_fpu_end();
-}
-
-static void
-xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 6;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor (%4), %%mm0 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " pxor 8(%4), %%mm1 ;\n"
- " movq %%mm0, (%1) ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " pxor 16(%4), %%mm2 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 24(%4), %%mm3 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " pxor 32(%4), %%mm4 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " pxor 40(%4), %%mm5 ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%4), %%mm6 ;\n"
- " pxor 56(%4), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " addl $64, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory");
-
- kernel_fpu_end();
-}
-
-static void
-xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 6;
-
- kernel_fpu_begin();
-
- /* Make sure GCC forgets anything it knows about p4 or p5,
- such that it won't pass to the asm volatile below a
- register that is shared with any other variable. That's
- because we modify p4 and p5 there, but we can't mark them
- as read/write, otherwise we'd overflow the 10-asm-operands
- limit of GCC < 3.1. */
- __asm__ ("" : "+r" (p4), "+r" (p5));
-
- __asm__ __volatile__ (
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " pxor (%4), %%mm0 ;\n"
- " pxor 8(%4), %%mm1 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " pxor (%5), %%mm0 ;\n"
- " pxor 8(%5), %%mm1 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 16(%4), %%mm2 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " pxor 16(%5), %%mm2 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 24(%4), %%mm3 ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 24(%5), %%mm3 ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%4), %%mm4 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " pxor 32(%5), %%mm4 ;\n"
- " pxor 40(%4), %%mm5 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " pxor 40(%5), %%mm5 ;\n"
- " pxor 48(%4), %%mm6 ;\n"
- " pxor 56(%4), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%5), %%mm6 ;\n"
- " pxor 56(%5), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " addl $64, %4 ;\n"
- " addl $64, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- /* p4 and p5 were modified, and now the variables are dead.
- Clobber them just to be sure nobody does something stupid
- like assuming they have some legal value. */
- __asm__ ("" : "=r" (p4), "=r" (p5));
-
- kernel_fpu_end();
-}
-
-static struct xor_block_template xor_block_pII_mmx = {
- .name = "pII_mmx",
- .do_2 = xor_pII_mmx_2,
- .do_3 = xor_pII_mmx_3,
- .do_4 = xor_pII_mmx_4,
- .do_5 = xor_pII_mmx_5,
-};
-
-static struct xor_block_template xor_block_p5_mmx = {
- .name = "p5_mmx",
- .do_2 = xor_p5_mmx_2,
- .do_3 = xor_p5_mmx_3,
- .do_4 = xor_p5_mmx_4,
- .do_5 = xor_p5_mmx_5,
-};
-
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-#define XMMS_SAVE do { \
- preempt_disable(); \
- if (!(current_thread_info()->status & TS_USEDFPU)) \
- clts(); \
- __asm__ __volatile__ ( \
- "movups %%xmm0,(%1) ;\n\t" \
- "movups %%xmm1,0x10(%1) ;\n\t" \
- "movups %%xmm2,0x20(%1) ;\n\t" \
- "movups %%xmm3,0x30(%1) ;\n\t" \
- : "=&r" (cr0) \
- : "r" (xmm_save) \
- : "memory"); \
-} while(0)
-
-#define XMMS_RESTORE do { \
- __asm__ __volatile__ ( \
- "sfence ;\n\t" \
- "movups (%1),%%xmm0 ;\n\t" \
- "movups 0x10(%1),%%xmm1 ;\n\t" \
- "movups 0x20(%1),%%xmm2 ;\n\t" \
- "movups 0x30(%1),%%xmm3 ;\n\t" \
- : \
- : "r" (cr0), "r" (xmm_save) \
- : "memory"); \
- if (!(current_thread_info()->status & TS_USEDFPU)) \
- stts(); \
- preempt_enable(); \
-} while(0)
-
-#define ALIGN16 __attribute__((aligned(16)))
-
-#define OFFS(x) "16*("#x")"
-#define PF_OFFS(x) "256+16*("#x")"
-#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
-#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
-#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
-#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
-#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
-#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
-#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
-#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
-#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
-#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
-#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
-#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
-#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- PF1(i) \
- PF1(i+2) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r"(p2), "+r"(p3)
- :
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- PF3(i) \
- PF3(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- /* Make sure GCC forgets anything it knows about p4 or p5,
- such that it won't pass to the asm volatile below a
- register that is shared with any other variable. That's
- because we modify p4 and p5 there, but we can't mark them
- as read/write, otherwise we'd overflow the 10-asm-operands
- limit of GCC < 3.1. */
- __asm__ ("" : "+r" (p4), "+r" (p5));
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- PF3(i) \
- PF3(i+2) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- PF4(i) \
- PF4(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- XO4(i,0) \
- XO4(i+1,1) \
- XO4(i+2,2) \
- XO4(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " addl $256, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- /* p4 and p5 were modified, and now the variables are dead.
- Clobber them just to be sure nobody does something stupid
- like assuming they have some legal value. */
- __asm__ ("" : "=r" (p4), "=r" (p5));
-
- XMMS_RESTORE;
-}
-
-static struct xor_block_template xor_block_pIII_sse = {
- .name = "pIII_sse",
- .do_2 = xor_sse_2,
- .do_3 = xor_sse_3,
- .do_4 = xor_sse_4,
- .do_5 = xor_sse_5,
-};
-
-/* Also try the generic routines. */
-#include <asm-generic/xor.h>
-
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES \
- do { \
- xor_speed(&xor_block_8regs); \
- xor_speed(&xor_block_8regs_p); \
- xor_speed(&xor_block_32regs); \
- xor_speed(&xor_block_32regs_p); \
- if (cpu_has_xmm) \
- xor_speed(&xor_block_pIII_sse); \
- if (cpu_has_mmx) { \
- xor_speed(&xor_block_pII_mmx); \
- xor_speed(&xor_block_p5_mmx); \
- } \
- } while (0)
-
-/* We force the use of the SSE xor block because it can write around L2.
- We may also be able to load into the L1 only depending on how the cpu
- deals with a load to a line that is being prefetched. */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
- (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/arch_hooks.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/arch_hooks.h
new file mode 100644
index 0000000000..28b96a6fb9
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/arch_hooks.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_ARCH_HOOKS_H
+#define _ASM_ARCH_HOOKS_H
+
+#include <linux/interrupt.h>
+
+/*
+ * linux/include/asm/arch_hooks.h
+ *
+ * define the architecture specific hooks
+ */
+
+/* these aren't arch hooks, they are generic routines
+ * that can be used by the hooks */
+extern void init_ISA_irqs(void);
+extern void apic_intr_init(void);
+extern void smp_intr_init(void);
+extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/* these are the defined hooks */
+extern void intr_init_hook(void);
+extern void pre_intr_init_hook(void);
+extern void pre_setup_arch_hook(void);
+extern void trap_init_hook(void);
+extern void time_init_hook(void);
+extern void mca_nmi_hook(void);
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/bootsetup.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/bootsetup.h
new file mode 100644
index 0000000000..731d8678ab
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/bootsetup.h
@@ -0,0 +1,41 @@
+
+#ifndef _X86_64_BOOTSETUP_H
+#define _X86_64_BOOTSETUP_H 1
+
+extern char x86_boot_params[2048];
+
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+#define PARAM ((unsigned char *)x86_boot_params)
+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
+#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
+#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
+#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
+#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
+
+#define INITRD_START (__pa(xen_start_info.mod_start))
+#define INITRD_SIZE (xen_start_info.mod_len)
+#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
+
+#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
+#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
+#define COMMAND_LINE saved_command_line
+
+#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_PROMPT_FLAG 0x8000
+#define RAMDISK_LOAD_FLAG 0x4000
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/desc.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/desc.h
new file mode 100644
index 0000000000..15a10a135d
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/desc.h
@@ -0,0 +1,240 @@
+/* Written 2000 by Andi Kleen */
+#ifndef __ARCH_DESC_H
+#define __ARCH_DESC_H
+
+#include <linux/threads.h>
+#include <asm/ldt.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/string.h>
+#include <asm/segment.h>
+#include <asm/mmu.h>
+
+// 8 byte segment descriptor
+struct desc_struct {
+ u16 limit0;
+ u16 base0;
+ unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
+ unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
+} __attribute__((packed));
+
+struct n_desc_struct {
+ unsigned int a,b;
+};
+
+enum {
+ GATE_INTERRUPT = 0xE,
+ GATE_TRAP = 0xF,
+ GATE_CALL = 0xC,
+};
+
+// 16byte gate
+struct gate_struct {
+ u16 offset_low;
+ u16 segment;
+ unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
+ u16 offset_middle;
+ u32 offset_high;
+ u32 zero1;
+} __attribute__((packed));
+
+#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
+#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
+#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
+
+enum {
+ DESC_TSS = 0x9,
+ DESC_LDT = 0x2,
+};
+
+// LDT or TSS descriptor in the GDT. 16 bytes.
+struct ldttss_desc {
+ u16 limit0;
+ u16 base0;
+ unsigned base1 : 8, type : 5, dpl : 2, p : 1;
+ unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+ u32 base3;
+ u32 zero1;
+} __attribute__((packed));
+
+struct desc_ptr {
+ unsigned short size;
+ unsigned long address;
+} __attribute__((packed)) ;
+
+extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
+
+extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
+
+#define get_cpu_gdt_table(_cpu) ((struct desc_struct *)(cpu_gdt_descr[(_cpu)].address))
+
+#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
+#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
+
+static inline void clear_LDT(void)
+{
+ int cpu = get_cpu();
+
+ /*
+ * NB. We load the default_ldt for lcall7/27 handling on demand, as
+ * it slows down context switching. Noone uses it anyway.
+ */
+ cpu = cpu; /* XXX avoid compiler warning */
+ xen_set_ldt(0UL, 0);
+ put_cpu();
+}
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt[];
+extern struct gate_struct idt_table[];
+
+static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
+{
+ struct gate_struct s;
+ s.offset_low = PTR_LOW(func);
+ s.segment = __KERNEL_CS;
+ s.ist = ist;
+ s.p = 1;
+ s.dpl = dpl;
+ s.zero0 = 0;
+ s.zero1 = 0;
+ s.type = type;
+ s.offset_middle = PTR_MIDDLE(func);
+ s.offset_high = PTR_HIGH(func);
+ /* does not need to be atomic because it is only done once at setup time */
+ memcpy(adr, &s, 16);
+}
+
+static inline void set_intr_gate(int nr, void *func)
+{
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
+}
+
+static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
+{
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
+}
+
+static inline void set_system_gate(int nr, void *func)
+{
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
+}
+
+static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
+ unsigned size)
+{
+ struct ldttss_desc d;
+ memset(&d,0,sizeof(d));
+ d.limit0 = size & 0xFFFF;
+ d.base0 = PTR_LOW(tss);
+ d.base1 = PTR_MIDDLE(tss) & 0xFF;
+ d.type = type;
+ d.p = 1;
+ d.limit1 = (size >> 16) & 0xF;
+ d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
+ d.base3 = PTR_HIGH(tss);
+ memcpy(ptr, &d, 16);
+}
+
+static inline void set_tss_desc(unsigned cpu, void *addr)
+{
+ set_tssldt_descriptor((struct ldttss_desc *)&get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS],
+ (unsigned long)addr,
+ DESC_TSS,
+ sizeof(struct tss_struct) - 1);
+}
+
+static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
+{
+ set_tssldt_descriptor((struct ldttss_desc *)&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
+ (unsigned long)addr,
+ DESC_LDT, size * 8 - 1);
+}
+
+static inline void set_seg_base(unsigned cpu, int entry, void *base)
+{
+ struct desc_struct *d = (struct desc_struct *)&get_cpu_gdt_table(cpu)[entry];
+ u32 addr = (u32)(u64)base;
+ BUG_ON((u64)base >> 32);
+ d->base0 = addr & 0xffff;
+ d->base1 = (addr >> 16) & 0xff;
+ d->base2 = (addr >> 24) & 0xff;
+}
+
+#define LDT_entry_a(info) \
+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+/* Don't allow setting of the lm bit. It is useless anyways because
+ 64bit system calls require __USER_CS. */
+#define LDT_entry_b(info) \
+ (((info)->base_addr & 0xff000000) | \
+ (((info)->base_addr & 0x00ff0000) >> 16) | \
+ ((info)->limit & 0xf0000) | \
+ (((info)->read_exec_only ^ 1) << 9) | \
+ ((info)->contents << 10) | \
+ (((info)->seg_not_present ^ 1) << 15) | \
+ ((info)->seg_32bit << 22) | \
+ ((info)->limit_in_pages << 23) | \
+ ((info)->useable << 20) | \
+ /* ((info)->lm << 21) | */ \
+ 0x7000)
+
+#define LDT_empty(info) (\
+ (info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0 && \
+ (info)->lm == 0)
+
+#if TLS_SIZE != 24
+# error update this code.
+#endif
+
+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+{
+#if 0
+ u64 *gdt = (u64 *)(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN);
+ gdt[0] = t->tls_array[0];
+ gdt[1] = t->tls_array[1];
+ gdt[2] = t->tls_array[2];
+#endif
+#define C(i) \
+ HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), t->tls_array[i])
+
+ C(0); C(1); C(2);
+#undef C
+}
+
+/*
+ * load one particular LDT into the current CPU
+ */
+extern inline void load_LDT_nolock (mm_context_t *pc, int cpu)
+{
+ void *segments = pc->ldt;
+ int count = pc->size;
+
+ if (likely(!count))
+ segments = NULL;
+
+ xen_set_ldt((unsigned long)segments, count);
+}
+
+static inline void load_LDT(mm_context_t *pc)
+{
+ int cpu = get_cpu();
+ load_LDT_nolock(pc, cpu);
+ put_cpu();
+}
+
+extern struct desc_ptr idt_descr;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/dma-mapping.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/dma-mapping.h
new file mode 100644
index 0000000000..a646e3f863
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/dma-mapping.h
@@ -0,0 +1,136 @@
+#ifndef _X8664_DMA_MAPPING_H
+#define _X8664_DMA_MAPPING_H 1
+
+/*
+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
+ * documentation.
+ */
+
+#include <linux/config.h>
+
+#include <asm/scatterlist.h>
+#include <asm/io.h>
+#include <asm/swiotlb.h>
+
+extern dma_addr_t bad_dma_address;
+#define dma_mapping_error(x) \
+ (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address))
+
+void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ unsigned gfp);
+void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+
+#ifdef CONFIG_GART_IOMMU
+
+extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size,
+ int direction);
+extern void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
+ int direction);
+
+#else
+
+/* No IOMMU */
+
+static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr,
+ size_t size, int direction)
+{
+ dma_addr_t addr;
+
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+ addr = virt_to_phys(ptr);
+
+ if ((addr+size) & ~*hwdev->dma_mask)
+ out_of_line_bug();
+ return phys_to_machine(addr);
+}
+
+static inline void dma_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+ /* Nothing to do */
+}
+#endif
+
+#define dma_map_page(dev,page,offset,size,dir) \
+ dma_map_single((dev), page_address(page)+(offset), (size), (dir))
+
+static inline void dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+
+ if (swiotlb)
+ return swiotlb_sync_single_for_cpu(hwdev,dma_handle,size,direction);
+
+ flush_write_buffers();
+}
+
+static inline void dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+
+ if (swiotlb)
+ return swiotlb_sync_single_for_device(hwdev,dma_handle,size,direction);
+
+ flush_write_buffers();
+}
+
+static inline void dma_sync_sg_for_cpu(struct device *hwdev,
+ struct scatterlist *sg,
+ int nelems, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+
+ if (swiotlb)
+ return swiotlb_sync_sg_for_cpu(hwdev,sg,nelems,direction);
+
+ flush_write_buffers();
+}
+
+static inline void dma_sync_sg_for_device(struct device *hwdev,
+ struct scatterlist *sg,
+ int nelems, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+
+ if (swiotlb)
+ return swiotlb_sync_sg_for_device(hwdev,sg,nelems,direction);
+
+ flush_write_buffers();
+}
+
+extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+
+#define dma_unmap_page dma_unmap_single
+
+extern int dma_supported(struct device *hwdev, u64 mask);
+extern int dma_get_cache_alignment(void);
+#define dma_is_consistent(h) 1
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+ *dev->dma_mask = mask;
+ return 0;
+}
+
+static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
+{
+ flush_write_buffers();
+}
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/fixmap.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/fixmap.h
new file mode 100644
index 0000000000..4c420f01ee
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/fixmap.h
@@ -0,0 +1,108 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/apicdef.h>
+#include <asm-xen/gnttab.h>
+#include <asm/page.h>
+#include <asm/vsyscall.h>
+#include <asm/vsyscall32.h>
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+
+enum fixed_addresses {
+ VSYSCALL_LAST_PAGE,
+ VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
+ VSYSCALL_HPET,
+ FIX_HPET_BASE,
+#ifdef CONFIG_X86_LOCAL_APIC
+ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
+#endif
+#ifdef CONFIG_X86_IO_APIC
+ FIX_IO_APIC_BASE_0,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+#endif
+ FIX_SHARED_INFO,
+ FIX_GNTTAB_BEGIN,
+ FIX_GNTTAB_END = FIX_GNTTAB_BEGIN + NR_GRANT_FRAMES - 1,
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+#define NR_FIX_ISAMAPS 256
+ FIX_ISAMAP_END,
+ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
+#endif
+ __end_of_fixed_addresses
+};
+
+extern void __set_fixmap (enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags);
+
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, __pgprot(0))
+
+#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
+#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
+#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+
+extern void __this_fixmap_does_not_exist(void);
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without translation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+extern inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /*
+ * this branch gets completely eliminated after inlining,
+ * except when someone tries to use fixaddr indices in an
+ * illegal way. (such as mixing up address types or using
+ * out-of-range indices).
+ *
+ * If it doesn't get removed, the linker will complain
+ * loudly with a reasonably clear error message..
+ */
+ if (idx >= __end_of_fixed_addresses)
+ __this_fixmap_does_not_exist();
+
+ return __fix_to_virt(idx);
+}
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/floppy.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/floppy.h
new file mode 100644
index 0000000000..dd6a76bc06
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/floppy.h
@@ -0,0 +1,204 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ *
+ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
+ */
+#ifndef __ASM_XEN_X86_64_FLOPPY_H
+#define __ASM_XEN_X86_64_FLOPPY_H
+
+#include <linux/vmalloc.h>
+
+
+/*
+ * The DMA channel used by the floppy controller cannot access data at
+ * addresses >= 16MB
+ *
+ * Went back to the 1MB limit, as some people had problems with the floppy
+ * driver otherwise. It doesn't matter much for performance anyway, as most
+ * floppy accesses go through the track buffer.
+ */
+#define _CROSS_64KB(a,s,vdma) \
+(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+
+#include <linux/vmalloc.h>
+
+/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
+#include <asm/dma.h>
+#undef MAX_DMA_ADDRESS
+#define MAX_DMA_ADDRESS 0
+#define CROSS_64KB(a,s) (0)
+
+#define fd_inb(port) inb_p(port)
+#define fd_outb(value,port) outb_p(value,port)
+
+#define fd_request_dma() (0)
+#define fd_free_dma() ((void)0)
+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
+#define fd_get_dma_residue() vdma_get_dma_residue(FLOPPY_DMA)
+#define fd_dma_mem_alloc(size) vdma_mem_alloc(size)
+#define fd_dma_mem_free(addr, size) vdma_mem_free(addr, size)
+#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
+
+static int virtual_dma_count;
+static int virtual_dma_residue;
+static char *virtual_dma_addr;
+static int virtual_dma_mode;
+static int doing_pdma;
+
+static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
+{
+ register unsigned char st;
+
+#undef TRACE_FLPY_INT
+
+#ifdef TRACE_FLPY_INT
+ static int calls=0;
+ static int bytes=0;
+ static int dma_wait=0;
+#endif
+ if (!doing_pdma)
+ return floppy_interrupt(irq, dev_id, regs);
+
+#ifdef TRACE_FLPY_INT
+ if(!calls)
+ bytes = virtual_dma_count;
+#endif
+
+ {
+ register int lcount;
+ register char *lptr;
+
+ st = 1;
+ for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
+ lcount; lcount--, lptr++) {
+ st=inb(virtual_dma_port+4) & 0xa0 ;
+ if(st != 0xa0)
+ break;
+ if(virtual_dma_mode)
+ outb_p(*lptr, virtual_dma_port+5);
+ else
+ *lptr = inb_p(virtual_dma_port+5);
+ }
+ virtual_dma_count = lcount;
+ virtual_dma_addr = lptr;
+ st = inb(virtual_dma_port+4);
+ }
+
+#ifdef TRACE_FLPY_INT
+ calls++;
+#endif
+ if(st == 0x20)
+ return IRQ_HANDLED;
+ if(!(st & 0x20)) {
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+#ifdef TRACE_FLPY_INT
+ printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
+ virtual_dma_count, virtual_dma_residue, calls, bytes,
+ dma_wait);
+ calls = 0;
+ dma_wait=0;
+#endif
+ doing_pdma = 0;
+ floppy_interrupt(irq, dev_id, regs);
+ return IRQ_HANDLED;
+ }
+#ifdef TRACE_FLPY_INT
+ if(!virtual_dma_count)
+ dma_wait++;
+#endif
+ return IRQ_HANDLED;
+}
+
+static void fd_disable_dma(void)
+{
+ doing_pdma = 0;
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+}
+
+static int vdma_get_dma_residue(unsigned int dummy)
+{
+ return virtual_dma_count + virtual_dma_residue;
+}
+
+
+static int fd_request_irq(void)
+{
+ return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
+ "floppy", NULL);
+}
+
+
+static unsigned long vdma_mem_alloc(unsigned long size)
+{
+ return (unsigned long) vmalloc(size);
+
+}
+
+static void vdma_mem_free(unsigned long addr, unsigned long size)
+{
+ vfree((void *)addr);
+}
+
+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+ doing_pdma = 1;
+ virtual_dma_port = io;
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
+ virtual_dma_addr = addr;
+ virtual_dma_count = size;
+ virtual_dma_residue = 0;
+ return 0;
+}
+
+/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
+#define FDC1 xen_floppy_init()
+static int FDC2 = -1;
+
+static int xen_floppy_init(void)
+{
+ use_virtual_dma = 1;
+ can_use_virtual_dma = 1;
+ return 0x340;
+}
+
+/*
+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
+ * coincides with another rtc CMOS user. Paul G.
+ */
+#define FLOPPY0_TYPE ({ \
+ unsigned long flags; \
+ unsigned char val; \
+ spin_lock_irqsave(&rtc_lock, flags); \
+ val = (CMOS_READ(0x10) >> 4) & 15; \
+ spin_unlock_irqrestore(&rtc_lock, flags); \
+ val; \
+})
+
+#define FLOPPY1_TYPE ({ \
+ unsigned long flags; \
+ unsigned char val; \
+ spin_lock_irqsave(&rtc_lock, flags); \
+ val = CMOS_READ(0x10) & 15; \
+ spin_unlock_irqrestore(&rtc_lock, flags); \
+ val; \
+})
+
+#define N_FDC 2
+#define N_DRIVE 8
+
+#define FLOPPY_MOTOR_MASK 0xf0
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* __ASM_XEN_X86_64_FLOPPY_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h
new file mode 100644
index 0000000000..e57c54769a
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h
@@ -0,0 +1,505 @@
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+/*
+ * Benjamin Liu <benjamin.liu@intel.com>
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Ported to x86-64.
+ *
+ */
+
+#ifndef __HYPERCALL_H__
+#define __HYPERCALL_H__
+#include <asm-xen/xen-public/xen.h>
+
+#define __syscall_clobber "r11","rcx","memory"
+
+/*
+ * Assembler stubs for hyper-calls.
+ */
+static inline int
+HYPERVISOR_set_trap_table(
+ trap_info_t *table)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_trap_table), "D" (table)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_mmu_update(
+ mmu_update_t *req, int count, int *success_count, domid_t domid)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ "movq %5, %%r10;" TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_mmu_update), "D" (req), "S" ((long)count),
+ "d" (success_count), "g" ((unsigned long)domid)
+ : __syscall_clobber, "r10" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_mmuext_op(
+ struct mmuext_op *op, int count, int *success_count, domid_t domid)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ "movq %5, %%r10;" TRAP_INSTR
+ : "=a" (ret)
+ : "0" (__HYPERVISOR_mmuext_op), "D" (op), "S" ((long)count),
+ "d" (success_count), "g" ((unsigned long)domid)
+ : __syscall_clobber, "r10" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_gdt(
+ unsigned long *frame_list, int entries)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_gdt), "D" (frame_list), "S" ((long)entries)
+ : __syscall_clobber );
+
+
+ return ret;
+}
+static inline int
+HYPERVISOR_stack_switch(
+ unsigned long ss, unsigned long esp)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_stack_switch), "D" (ss), "S" (esp)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+ unsigned long event_address, unsigned long failsafe_address,
+ unsigned long syscall_address)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_callbacks), "D" (event_address),
+ "S" (failsafe_address), "d" (syscall_address)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+ int set)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" ((unsigned long)__HYPERVISOR_fpu_taskswitch),
+ "D" ((unsigned long) set) : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_yield(
+ void)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op), "D" ((unsigned long)SCHEDOP_yield)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_block(
+ void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op), "D" ((unsigned long)SCHEDOP_block)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_shutdown(
+ void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op),
+ "D" ((unsigned long)(SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift)))
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_reboot(
+ void)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op),
+ "D" ((unsigned long)(SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift)))
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_suspend(
+ unsigned long srec)
+{
+ int ret;
+
+ /* NB. On suspend, control software expects a suspend record in %esi. */
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op),
+ "D" ((unsigned long)(SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift))),
+ "S" (srec)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+/*
+ * We can have the timeout value in a single argument for the hypercall, but
+ * that will break the common code.
+ */
+static inline long
+HYPERVISOR_set_timer_op(
+ u64 timeout)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_timer_op),
+ "D" (timeout)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_dom0_op(
+ dom0_op_t *dom0_op)
+{
+ int ret;
+
+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_dom0_op), "D" (dom0_op)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_debugreg(
+ int reg, unsigned long value)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_debugreg), "D" ((unsigned long)reg), "S" (value)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(
+ int reg)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_get_debugreg), "D" ((unsigned long)reg)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_descriptor(
+ unsigned long ma, unsigned long word)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_update_descriptor), "D" (ma),
+ "S" (word)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_dom_mem_op(
+ unsigned int op, unsigned long *extent_list,
+ unsigned long nr_extents, unsigned int extent_order)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ "movq %5,%%r10; movq %6,%%r8;" TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_dom_mem_op), "D" ((unsigned long)op), "S" (extent_list),
+ "d" (nr_extents), "g" ((unsigned long) extent_order), "g" ((unsigned long) DOMID_SELF)
+ : __syscall_clobber,"r8","r10");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_multicall(
+ void *call_list, int nr_calls)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_multicall), "D" (call_list), "S" ((unsigned long)nr_calls)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(
+ unsigned long page_nr, pte_t new_val, unsigned long flags)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_update_va_mapping),
+ "D" (page_nr), "S" (new_val.pte), "d" (flags)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_event_channel_op(
+ void *op)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_event_channel_op), "D" (op)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_xen_version(
+ int cmd)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_xen_version), "D" ((unsigned long)cmd)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_console_io(
+ int cmd, int count, char *str)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_console_io), "D" ((unsigned long)cmd), "S" ((unsigned long)count), "d" (str)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_physdev_op(
+ void *physdev_op)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_physdev_op), "D" (physdev_op)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_grant_table_op(
+ unsigned int cmd, void *uop, unsigned int count)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_grant_table_op), "D" ((unsigned long)cmd), "S" ((unsigned long)uop), "d" (count)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(
+ unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ "movq %5, %%r10;" TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_update_va_mapping_otherdomain),
+ "D" (page_nr), "S" (new_val.pte), "d" (flags), "g" ((unsigned long)domid)
+ : __syscall_clobber,"r10");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_vm_assist(
+ unsigned int cmd, unsigned int type)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_vm_assist), "D" ((unsigned long)cmd), "S" ((unsigned long)type)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_switch_to_user(void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" ((unsigned long)__HYPERVISOR_switch_to_user) : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_boot_vcpu(
+ unsigned long vcpu, vcpu_guest_context_t *ctxt)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" (__HYPERVISOR_boot_vcpu), "D" (vcpu), "S" (ctxt)
+ : __syscall_clobber);
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_segment_base(
+ int reg, unsigned long value)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_segment_base), "D" ((unsigned long)reg), "S" (value)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+#endif /* __HYPERCALL_H__ */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/io.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/io.h
new file mode 100644
index 0000000000..90466f397b
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/io.h
@@ -0,0 +1,365 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <linux/config.h>
+#include <asm/fixmap.h>
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ * Linus
+ */
+
+ /*
+ * Bit simplified and optimized by Jan Hubicka
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
+ *
+ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
+ * isa_read[wl] and isa_write[wl] fixed
+ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ */
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
+#else
+#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
+#endif
+
+#ifdef REALLY_SLOW_IO
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
+#else
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+
+/*
+ * Talk about misusing macros..
+ */
+#define __OUT1(s,x) \
+extern inline void out##s(unsigned x value, unsigned short port) {
+
+#define __OUT2(s,s1,s2) \
+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+
+#define __OUT(s,s1,x) \
+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
+__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
+
+#define __IN1(s) \
+extern inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
+
+#define __IN2(s,s1,s2) \
+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+
+#define __IN(s,s1,i...) \
+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+
+#define __INS(s) \
+extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("rep ; ins" #s \
+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define __OUTS(s) \
+extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("rep ; outs" #s \
+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define RETURN_TYPE unsigned char
+__IN(b,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned short
+__IN(w,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned int
+__IN(l,"")
+#undef RETURN_TYPE
+
+__OUT(b,"b",char)
+__OUT(w,"w",short)
+__OUT(l,,int)
+
+__INS(b)
+__INS(w)
+__INS(l)
+
+__OUTS(b)
+__OUTS(w)
+__OUTS(l)
+
+#define IO_SPACE_LIMIT 0xffff
+
+#if defined(__KERNEL__) && __x86_64__
+
+#include <linux/vmalloc.h>
+
+#ifndef __i386__
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are pretty trivial
+ */
+extern inline unsigned long virt_to_phys(volatile void * address)
+{
+ return __pa(address);
+}
+
+extern inline void * phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+
+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
+#define bus_to_virt(_x) __va(machine_to_phys(_x))
+#endif
+
+/*
+ * Change "struct page" to physical address.
+ */
+#ifdef CONFIG_DISCONTIGMEM
+#include <asm/mmzone.h>
+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
+
+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
+ (unsigned long) bio_offset((bio)))
+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
+ (unsigned long) (bv)->bv_offset)
+
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
+ bvec_to_pseudophys((vec2))))
+#else
+// #define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
+
+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
+ (unsigned long) bio_offset((bio)))
+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
+ (unsigned long) (bv)->bv_offset)
+
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
+ bvec_to_pseudophys((vec2))))
+#endif
+
+#include <asm-generic/iomap.h>
+
+extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+extern inline void __iomem * ioremap (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, 0);
+}
+
+/*
+ * This one maps high address device memory and turns off caching for that area.
+ * it's useful if some control registers are in such an area and write combining
+ * or read caching is not desirable:
+ */
+extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
+extern void iounmap(volatile void __iomem *addr);
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+
+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
+#else
+#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
+#endif
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
+#define bus_to_virt(_x) __va(machine_to_phys(_x))
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+
+static inline __u8 __readb(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u8 *)addr;
+}
+static inline __u16 __readw(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u16 *)addr;
+}
+static inline __u32 __readl(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u32 *)addr;
+}
+static inline __u64 __readq(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u64 *)addr;
+}
+#define readb(x) __readb(x)
+#define readw(x) __readw(x)
+#define readl(x) __readl(x)
+#define readq(x) __readq(x)
+#define readb_relaxed(a) readb(a)
+#define readw_relaxed(a) readw(a)
+#define readl_relaxed(a) readl(a)
+#define readq_relaxed(a) readq(a)
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+#define __raw_readq readq
+
+#define mmiowb()
+
+#ifdef CONFIG_UNORDERED_IO
+static inline void __writel(__u32 val, volatile void __iomem *addr)
+{
+ volatile __u32 __iomem *target = addr;
+ asm volatile("movnti %1,%0"
+ : "=m" (*target)
+ : "r" (val) : "memory");
+}
+
+static inline void __writeq(__u64 val, volatile void __iomem *addr)
+{
+ volatile __u64 __iomem *target = addr;
+ asm volatile("movnti %1,%0"
+ : "=m" (*target)
+ : "r" (val) : "memory");
+}
+#else
+static inline void __writel(__u32 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u32 *)addr = b;
+}
+static inline void __writeq(__u64 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u64 *)addr = b;
+}
+#endif
+static inline void __writeb(__u8 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u8 *)addr = b;
+}
+static inline void __writew(__u16 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u16 *)addr = b;
+}
+#define writeq(val,addr) __writeq((val),(addr))
+#define writel(val,addr) __writel((val),(addr))
+#define writew(val,addr) __writew((val),(addr))
+#define writeb(val,addr) __writeb((val),(addr))
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+#define __raw_writeq writeq
+
+void __memcpy_fromio(void*,unsigned long,unsigned);
+void __memcpy_toio(unsigned long,const void*,unsigned);
+
+static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
+{
+ __memcpy_fromio(to,(unsigned long)from,len);
+}
+static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
+{
+ __memcpy_toio((unsigned long)to,from,len);
+}
+
+void memset_io(volatile void __iomem *a, int b, size_t c);
+
+/*
+ * ISA space is 'always mapped' on a typical x86 system, no need to
+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
+ * are physical addresses. The following constant pointer can be
+ * used as the IO-area pointer (it can be iounmapped as well, so the
+ * analogy with PCI is quite large):
+ */
+#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
+
+#define isa_readb(a) readb(__ISA_IO_base + (a))
+#define isa_readw(a) readw(__ISA_IO_base + (a))
+#define isa_readl(a) readl(__ISA_IO_base + (a))
+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
+#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
+#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
+#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
+#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
+#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
+
+
+/*
+ * Again, x86-64 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
+#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d))
+
+/**
+ * check_signature - find BIOS signatures
+ * @io_addr: mmio address to check
+ * @signature: signature block
+ * @length: length of signature
+ *
+ * Perform a signature comparison with the mmio address io_addr. This
+ * address should have been obtained by ioremap.
+ * Returns 1 on a match.
+ */
+
+static inline int check_signature(void __iomem *io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+
+#define flush_write_buffers()
+
+extern int iommu_bio_merge;
+#define BIO_VMERGE_BOUNDARY iommu_bio_merge
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/irq.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/irq.h
new file mode 100644
index 0000000000..ccd85685fd
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/irq.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+/*
+ * linux/include/asm/irq.h
+ *
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ *
+ * IRQ/IPI changes taken from work by Thomas Radke
+ * <tomsoft@informatik.tu-chemnitz.de>
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+/* include comes from machine specific directory */
+#include "irq_vectors.h"
+#include <asm/thread_info.h>
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ return ((irq == 2) ? 9 : irq);
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
+#endif
+
+#define KDB_VECTOR 0xf9
+
+# define irq_ctx_init(cpu) do { } while (0)
+
+struct irqaction;
+struct pt_regs;
+int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
+
+#endif /* _ASM_IRQ_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/io_ports.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/io_ports.h
new file mode 100644
index 0000000000..a96d9f6604
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/io_ports.h
@@ -0,0 +1,30 @@
+/*
+ * arch/i386/mach-generic/io_ports.h
+ *
+ * Machine specific IO port address definition for generic.
+ * Written by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_IO_PORTS_H
+#define _MACH_IO_PORTS_H
+
+/* i8253A PIT registers */
+#define PIT_MODE 0x43
+#define PIT_CH0 0x40
+#define PIT_CH2 0x42
+
+/* i8259A PIC registers */
+#define PIC_MASTER_CMD 0x20
+#define PIC_MASTER_IMR 0x21
+#define PIC_MASTER_ISR PIC_MASTER_CMD
+#define PIC_MASTER_POLL PIC_MASTER_ISR
+#define PIC_MASTER_OCW3 PIC_MASTER_ISR
+#define PIC_SLAVE_CMD 0xa0
+#define PIC_SLAVE_IMR 0xa1
+
+/* i8259A PIC related value */
+#define PIC_CASCADE_IR 2
+#define MASTER_ICW4_DEFAULT 0x01
+#define SLAVE_ICW4_DEFAULT 0x01
+#define PIC_ICW4_AEOI 2
+
+#endif /* !_MACH_IO_PORTS_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h
new file mode 100644
index 0000000000..dd28b49175
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h
@@ -0,0 +1,137 @@
+/*
+ * This file should contain #defines for all of the interrupt vector
+ * numbers used by this architecture.
+ *
+ * In addition, there are some standard defines:
+ *
+ * FIRST_EXTERNAL_VECTOR:
+ * The first free place for external interrupts
+ *
+ * SYSCALL_VECTOR:
+ * The IRQ vector a syscall makes the user to kernel transition
+ * under.
+ *
+ * TIMER_IRQ:
+ * The IRQ number the timer interrupt comes in at.
+ *
+ * NR_IRQS:
+ * The total number of interrupt vectors (including all the
+ * architecture specific interrupts) needed.
+ *
+ */
+#ifndef _ASM_IRQ_VECTORS_H
+#define _ASM_IRQ_VECTORS_H
+
+/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+#define FIRST_EXTERNAL_VECTOR 0x20
+
+#define SYSCALL_VECTOR 0x80
+
+/*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
+#if 0
+/*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ * some of the following vectors are 'rare', they are merged
+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ * TLB, reschedule and local APIC vectors are performance-critical.
+ *
+ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
+ */
+#define SPURIOUS_APIC_VECTOR 0xff
+#define ERROR_APIC_VECTOR 0xfe
+#define INVALIDATE_TLB_VECTOR 0xfd
+#define RESCHEDULE_VECTOR 0xfc
+#define CALL_FUNCTION_VECTOR 0xfb
+
+#define THERMAL_APIC_VECTOR 0xf0
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR 0xef
+#endif
+
+/*
+ * First APIC vector available to drivers: (vectors 0x30-0xee)
+ * we start at 0x31 to spread out vectors evenly between priority
+ * levels. (0x80 is the syscall vector)
+ */
+#define FIRST_DEVICE_VECTOR 0x31
+#define FIRST_SYSTEM_VECTOR 0xef
+
+/*
+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
+ * Right now the APIC is mostly only used for SMP.
+ * 256 vectors is an architectural limit. (we can have
+ * more than 256 devices theoretically, but they will
+ * have to use shared interrupts)
+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
+ * the usable vector space is 0x20-0xff (224 vectors)
+ */
+
+#define NR_IPIS 8
+
+#define RESCHEDULE_VECTOR 1
+#define INVALIDATE_TLB_VECTOR 2
+#define CALL_FUNCTION_VECTOR 3
+
+/*
+ * The maximum number of vectors supported by i386 processors
+ * is limited to 256. For processors other than i386, NR_VECTORS
+ * should be changed accordingly.
+ */
+#define NR_VECTORS 256
+
+#define FPU_IRQ 13
+
+#define FIRST_VM86_IRQ 3
+#define LAST_VM86_IRQ 15
+#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
+
+/*
+ * The flat IRQ space is divided into two regions:
+ * 1. A one-to-one mapping of real physical IRQs. This space is only used
+ * if we have physical device-access privilege. This region is at the
+ * start of the IRQ space so that existing device drivers do not need
+ * to be modified to translate physical IRQ numbers into our IRQ space.
+ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
+ * are bound using the provided bind/unbind functions.
+ */
+
+#define PIRQ_BASE 0
+#define NR_PIRQS 256
+
+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
+#define NR_DYNIRQS 256
+
+#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
+#define NR_IRQ_VECTORS NR_IRQS
+
+#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
+#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
+
+#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
+#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
+
+#ifndef __ASSEMBLY__
+/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
+extern int bind_virq_to_irq(int virq);
+extern void unbind_virq_from_irq(int virq);
+extern int bind_ipi_on_cpu_to_irq(int cpu, int ipi);
+extern void unbind_ipi_on_cpu_from_irq(int cpu, int ipi);
+extern int bind_evtchn_to_irq(int evtchn);
+extern void unbind_evtchn_from_irq(int evtchn);
+
+extern void irq_suspend(void);
+extern void irq_resume(void);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/mach_time.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/mach_time.h
new file mode 100644
index 0000000000..b749aa44a8
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/mach_time.h
@@ -0,0 +1,122 @@
+/*
+ * include/asm-i386/mach-default/mach_time.h
+ *
+ * Machine specific set RTC function for generic.
+ * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_TIME_H
+#define _MACH_TIME_H
+
+#include <linux/mc146818rtc.h>
+
+/* for check timing call set_rtc_mmss() 500ms */
+/* used in arch/i386/time.c::do_timer_interrupt() */
+#define USEC_AFTER 500000
+#define USEC_BEFORE 500000
+
+/*
+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
+ * called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Motorola
+ * MC146818A or Dallas DS12887 data sheet for details.
+ *
+ * BUG: This routine does not handle hour overflow properly; it just
+ * sets the minutes. Usually you'll only notice that after reboot!
+ */
+static inline int mach_set_rtc_mmss(unsigned long nowtime)
+{
+ int retval = 0;
+ int real_seconds, real_minutes, cmos_minutes;
+ unsigned char save_control, save_freq_select;
+
+ save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ cmos_minutes = CMOS_READ(RTC_MINUTES);
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ BCD_TO_BIN(cmos_minutes);
+
+ /*
+ * since we're only adjusting minutes and seconds,
+ * don't interfere with hour overflow. This avoids
+ * messing with unknown time zones but requires your
+ * RTC not to be off by more than 15 minutes
+ */
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
+ real_minutes += 30; /* correct for half hour time zone */
+ real_minutes %= 60;
+
+ if (abs(real_minutes - cmos_minutes) < 30) {
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ BIN_TO_BCD(real_seconds);
+ BIN_TO_BCD(real_minutes);
+ }
+ CMOS_WRITE(real_seconds,RTC_SECONDS);
+ CMOS_WRITE(real_minutes,RTC_MINUTES);
+ } else {
+ printk(KERN_WARNING
+ "set_rtc_mmss: can't update from %d to %d\n",
+ cmos_minutes, real_minutes);
+ retval = -1;
+ }
+
+ /* The following flags have to be released exactly in this order,
+ * otherwise the DS12887 (popular MC146818A clone with integrated
+ * battery and quartz) will not reset the oscillator and will not
+ * update precisely 500 ms later. You won't find this mentioned in
+ * the Dallas Semiconductor data sheets, but who believes data
+ * sheets anyway ... -- Markus Kuhn
+ */
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+ return retval;
+}
+
+static inline unsigned long mach_get_cmos_time(void)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ int i;
+
+ /* The Linux interpretation of the CMOS clock register contents:
+ * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
+ * RTC registers show the second which has precisely just started.
+ * Let's hope other operating systems interpret the RTC the same way.
+ */
+ /* read RTC exactly on falling edge of update flag */
+ for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
+ break;
+ for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
+ if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+ break;
+ do { /* Isn't this overkill ? UIP above should guarantee consistency */
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
+ } while (sec != CMOS_READ(RTC_SECONDS));
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ {
+ BCD_TO_BIN(sec);
+ BCD_TO_BIN(min);
+ BCD_TO_BIN(hour);
+ BCD_TO_BIN(day);
+ BCD_TO_BIN(mon);
+ BCD_TO_BIN(year);
+ }
+ if ((year += 1900) < 1970)
+ year += 100;
+
+ return mktime(year, mon, day, hour, min, sec);
+}
+
+#endif /* !_MACH_TIME_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h
new file mode 100644
index 0000000000..4b9703bb02
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h
@@ -0,0 +1,48 @@
+/*
+ * include/asm-i386/mach-default/mach_timer.h
+ *
+ * Machine specific calibrate_tsc() for generic.
+ * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+/* ------ Calibrate the TSC -------
+ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
+ * Too much 64-bit arithmetic here to do this cleanly in C, and for
+ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
+ * output busy loop as low as possible. We avoid reading the CTC registers
+ * directly because of the awkward 8-bit access mechanism of the 82C54
+ * device.
+ */
+#ifndef _MACH_TIMER_H
+#define _MACH_TIMER_H
+
+#define CALIBRATE_LATCH (5 * LATCH)
+
+static inline void mach_prepare_counter(void)
+{
+ /* Set the Gate high, disable speaker */
+ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
+
+ /*
+ * Now let's take care of CTC channel 2
+ *
+ * Set the Gate high, program CTC channel 2 for mode 0,
+ * (interrupt on terminal count mode), binary count,
+ * load 5 * LATCH count, (LSB and MSB) to begin countdown.
+ *
+ * Some devices need a delay here.
+ */
+ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
+ outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
+ outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
+}
+
+static inline void mach_countup(unsigned long *count_p)
+{
+ unsigned long count = 0;
+ do {
+ count++;
+ } while ((inb_p(0x61) & 0x20) == 0);
+ *count_p = count;
+}
+
+#endif /* !_MACH_TIMER_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h
new file mode 100644
index 0000000000..5761edd144
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h
@@ -0,0 +1,47 @@
+/**
+ * machine_specific_memory_setup - Hook for machine specific memory setup.
+ *
+ * Description:
+ * This is included late in kernel/setup.c so that it can make
+ * use of all of the static functions.
+ **/
+
+static char * __init machine_specific_memory_setup(void)
+{
+ char *who;
+ unsigned long start_pfn, max_pfn;
+
+ who = "Xen";
+
+ start_pfn = 0;
+ max_pfn = xen_start_info.nr_pages;
+
+ e820.nr_map = 0;
+ add_memory_region(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn) - PFN_PHYS(start_pfn), E820_RAM);
+
+ return who;
+}
+
+void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
+{
+ clear_bit(X86_FEATURE_VME, c->x86_capability);
+ clear_bit(X86_FEATURE_DE, c->x86_capability);
+ clear_bit(X86_FEATURE_PSE, c->x86_capability);
+ clear_bit(X86_FEATURE_PGE, c->x86_capability);
+ clear_bit(X86_FEATURE_SEP, c->x86_capability);
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ clear_bit(X86_FEATURE_MTRR, c->x86_capability);
+}
+
+extern void hypervisor_callback(void);
+extern void failsafe_callback(void);
+
+static void __init machine_specific_arch_setup(void)
+{
+ HYPERVISOR_set_callbacks(
+ (unsigned long) hypervisor_callback,
+ (unsigned long) failsafe_callback,
+ (unsigned long) system_call);
+
+ machine_specific_modify_cpu_capabilities(&boot_cpu_data);
+}
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h
new file mode 100644
index 0000000000..b18df6896c
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h
@@ -0,0 +1,5 @@
+/* Hook to call BIOS initialisation function */
+
+#define ARCH_SETUP machine_specific_arch_setup();
+
+static void __init machine_specific_arch_setup(void);
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/smpboot_hooks.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/smpboot_hooks.h
new file mode 100644
index 0000000000..28adeaf244
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/smpboot_hooks.h
@@ -0,0 +1,55 @@
+/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
+ * which needs to alter them. */
+
+static inline void smpboot_clear_io_apic_irqs(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+ io_apic_irqs = 0;
+#endif
+}
+
+static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+{
+#if 1
+ printk("smpboot_setup_warm_reset_vector\n");
+#else
+ CMOS_WRITE(0xa, 0xf);
+ local_flush_tlb();
+ Dprintk("1.\n");
+ *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
+ Dprintk("2.\n");
+ *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
+ Dprintk("3.\n");
+#endif
+}
+
+static inline void smpboot_restore_warm_reset_vector(void)
+{
+ /*
+ * Install writable page 0 entry to set BIOS data area.
+ */
+ local_flush_tlb();
+
+ /*
+ * Paranoid: Set warm reset code and vector here back
+ * to default values.
+ */
+ CMOS_WRITE(0, 0xf);
+
+ *((volatile long *) phys_to_virt(0x467)) = 0;
+}
+
+static inline void smpboot_setup_io_apic(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+ /*
+ * Here we can be sure that there is an IO-APIC in the system. Let's
+ * go and set it up:
+ */
+ if (!skip_ioapic_setup && nr_ioapics)
+ setup_IO_APIC();
+#endif
+}
+
+
+#define smp_found_config (HYPERVISOR_shared_info->n_vcpu > 1)
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h
new file mode 100644
index 0000000000..4e487a06d6
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h
@@ -0,0 +1,76 @@
+#ifndef __X86_64_MMU_CONTEXT_H
+#define __X86_64_MMU_CONTEXT_H
+
+#include <linux/config.h>
+#include <asm/desc.h>
+#include <asm/atomic.h>
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/pda.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+/*
+ * possibly do the LDT unload here?
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
+
+#ifdef CONFIG_SMP
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ if (read_pda(mmu_state) == TLBSTATE_OK)
+ write_pda(mmu_state, TLBSTATE_LAZY);
+}
+#else
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+#endif
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned cpu = smp_processor_id();
+ if (likely(prev != next)) {
+ /* stop flush ipis for the previous mm */
+ clear_bit(cpu, &prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+ write_pda(mmu_state, TLBSTATE_OK);
+ write_pda(active_mm, next);
+#endif
+ set_bit(cpu, &next->cpu_vm_mask);
+ load_cr3(next->pgd);
+ xen_new_user_pt(__pa(__user_pgd(next->pgd)));
+ if (unlikely(next->context.ldt != prev->context.ldt))
+ load_LDT_nolock(&next->context, cpu);
+ }
+#ifdef CONFIG_SMP
+ else {
+ write_pda(mmu_state, TLBSTATE_OK);
+ if (read_pda(active_mm) != next)
+ out_of_line_bug();
+ if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
+ /* We were in lazy tlb mode and leave_mm disabled
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ */
+ load_cr3(next->pgd);
+ xen_new_user_pt(__pa(__user_pgd(next->pgd)));
+ load_LDT_nolock(&next->context, cpu);
+ }
+ }
+#endif
+}
+
+#define deactivate_mm(tsk,mm) do { \
+ load_gs_index(0); \
+ asm volatile("movl %0,%%fs"::"r"(0)); \
+} while(0)
+
+#define activate_mm(prev, next) do { \
+ switch_mm((prev),(next),NULL); \
+} while (0)
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/page.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/page.h
new file mode 100644
index 0000000000..8acd7990dc
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/page.h
@@ -0,0 +1,229 @@
+#ifndef _X86_64_PAGE_H
+#define _X86_64_PAGE_H
+
+#include <linux/config.h>
+/* #include <linux/string.h> */
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+#include <asm-xen/xen-public/xen.h>
+#include <asm-xen/foreign_page.h>
+
+#define arch_free_page(_page,_order) \
+({ int foreign = PageForeign(_page); \
+ if (foreign) \
+ (PageForeignDestructor(_page))(_page); \
+ foreign; \
+})
+#define HAVE_ARCH_FREE_PAGE
+
+#ifdef CONFIG_XEN_SCRUB_PAGES
+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
+#else
+#define scrub_pages(_p,_n) ((void)0)
+#endif
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE (0x1 << PAGE_SHIFT)
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#endif
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT))
+
+#define THREAD_ORDER 1
+#ifdef __ASSEMBLY__
+#define THREAD_SIZE (1 << (PAGE_SHIFT + THREAD_ORDER))
+#else
+#define THREAD_SIZE (1UL << (PAGE_SHIFT + THREAD_ORDER))
+#endif
+#define CURRENT_MASK (~(THREAD_SIZE-1))
+
+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
+
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+void clear_page(void *);
+void copy_page(void *, void *);
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+extern u32 *phys_to_machine_mapping;
+#define pfn_to_mfn(_pfn) ((unsigned long) phys_to_machine_mapping[(unsigned int)(_pfn)])
+#define mfn_to_pfn(_mfn) ((unsigned long) machine_to_phys_mapping[(unsigned int)(_mfn)])
+static inline unsigned long phys_to_machine(unsigned long phys)
+{
+ unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+ return machine;
+}
+
+static inline unsigned long machine_to_phys(unsigned long machine)
+{
+ unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+ return phys;
+}
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pud; } pud_t;
+typedef struct { unsigned long pgd; } pgd_t;
+#define PTE_MASK PHYSICAL_PAGE_MASK
+
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) (((x).pte & 1) ? machine_to_phys((x).pte) : \
+ (x).pte)
+#define pte_val_ma(x) ((x).pte)
+
+static inline unsigned long pmd_val(pmd_t x)
+{
+ unsigned long ret = x.pmd;
+ if (ret) ret = machine_to_phys(ret);
+ return ret;
+}
+
+static inline unsigned long pud_val(pud_t x)
+{
+ unsigned long ret = x.pud;
+ if (ret) ret = machine_to_phys(ret);
+ return ret;
+}
+
+static inline unsigned long pgd_val(pgd_t x)
+{
+ unsigned long ret = x.pgd;
+ if (ret) ret = machine_to_phys(ret);
+ return ret;
+}
+
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte_ma(x) ((pte_t) { (x) } )
+
+static inline pte_t __pte(unsigned long x)
+{
+ if (x & 1) x = phys_to_machine(x);
+ return ((pte_t) { (x) });
+}
+
+static inline pmd_t __pmd(unsigned long x)
+{
+ if ((x & 1)) x = phys_to_machine(x);
+ return ((pmd_t) { (x) });
+}
+
+static inline pud_t __pud(unsigned long x)
+{
+ if ((x & 1)) x = phys_to_machine(x);
+ return ((pud_t) { (x) });
+}
+
+static inline pgd_t __pgd(unsigned long x)
+{
+ if ((x & 1)) x = phys_to_machine(x);
+ return ((pgd_t) { (x) });
+}
+
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+extern unsigned long vm_stack_flags, vm_stack_flags32;
+extern unsigned long vm_data_default_flags, vm_data_default_flags32;
+extern unsigned long vm_force_exec32;
+
+#define __START_KERNEL 0xffffffff80100000UL
+#define __START_KERNEL_map 0xffffffff80000000UL
+#define __PAGE_OFFSET 0xffff880000000000UL
+
+#else
+#define __START_KERNEL 0xffffffff80100000
+#define __START_KERNEL_map 0xffffffff80000000
+#define __PAGE_OFFSET 0xffff880000000000
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* See Documentation/x86_64/mm.txt for a description of the memory map. */
+#define __PHYSICAL_MASK_SHIFT 46
+#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
+#define __VIRTUAL_MASK_SHIFT 48
+#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
+
+#define KERNEL_TEXT_SIZE (40UL*1024*1024)
+#define KERNEL_TEXT_START 0xffffffff80000000UL
+
+#ifndef __ASSEMBLY__
+
+#include <asm/bug.h>
+
+/* Pure 2^n version of get_order */
+extern __inline__ int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+
+/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
+ Otherwise you risk miscompilation. */
+#define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
+/* __pa_symbol should be used for C visible symbols.
+ This seems to be the official gcc blessed way to do such arithmetic. */
+#define __pa_symbol(x) \
+ ({unsigned long v; \
+ asm("" : "=r" (v) : "0" (x)); \
+ __pa(v); })
+
+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#ifndef CONFIG_DISCONTIGMEM
+#define pfn_to_page(pfn) (mem_map + (pfn))
+#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+/* VIRT <-> MACHINE conversion */
+#define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
+#define machine_to_virt(_m) (__va(machine_to_phys(_m)))
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define __HAVE_ARCH_GATE_AREA 1
+
+#endif /* __KERNEL__ */
+
+#endif /* _X86_64_PAGE_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/param.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/param.h
new file mode 100644
index 0000000000..5145e63610
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/param.h
@@ -0,0 +1,22 @@
+#ifndef _ASMx86_64_PARAM_H
+#define _ASMx86_64_PARAM_H
+
+#ifdef __KERNEL__
+# define HZ 100 /* Internal kernel timer frequency */
+# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
+# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
+#endif
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pci.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pci.h
new file mode 100644
index 0000000000..039046fdc8
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pci.h
@@ -0,0 +1,148 @@
+#ifndef __x8664_PCI_H
+#define __x8664_PCI_H
+
+#include <linux/config.h>
+#include <asm/io.h>
+
+#ifdef __KERNEL__
+
+#include <linux/mm.h> /* for struct page */
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ already-configured bus numbers - to be used for buggy BIOSes
+ or architectures with incomplete PCI setup by the loader */
+
+#ifdef CONFIG_PCI
+extern unsigned int pcibios_assign_all_busses(void);
+#else
+#define pcibios_assign_all_busses() 0
+#endif
+#define pcibios_scan_all_fns(a, b) 0
+
+extern int no_iommu, force_iommu;
+
+extern unsigned long pci_mem_start;
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM (pci_mem_start)
+
+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
+
+void pcibios_config_init(void);
+struct pci_bus * pcibios_scan_root(int bus);
+extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
+extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
+
+void pcibios_set_master(struct pci_dev *dev);
+void pcibios_penalize_isa_irq(int irq);
+struct irq_routing_table *pcibios_get_irq_routing_table(void);
+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/scatterlist.h>
+#include <linux/string.h>
+#include <asm/page.h>
+
+extern int iommu_setup(char *opt);
+
+#ifdef CONFIG_GART_IOMMU
+/* The PCI address space does equal the physical memory
+ * address space. The networking and block device layers use
+ * this boolean for bounce buffer decisions
+ *
+ * On AMD64 it mostly equals, but we set it to zero to tell some subsystems
+ * that an IOMMU is available.
+ */
+#define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0)
+
+/*
+ * x86-64 always supports DAC, but sometimes it is useful to force
+ * devices through the IOMMU to get automatic sg list merging.
+ * Optional right now.
+ */
+extern int iommu_sac_force;
+#define pci_dac_dma_supported(pci_dev, mask) (!iommu_sac_force)
+
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+ dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+ __u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+ ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+ ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+ (((PTR)->LEN_NAME) = (VAL))
+
+#else
+/* No IOMMU */
+
+#define PCI_DMA_BUS_IS_PHYS 1
+#define pci_dac_dma_supported(pci_dev, mask) 1
+
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME) (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+
+#endif
+
+#include <asm-generic/pci-dma-compat.h>
+
+static inline dma64_addr_t
+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
+{
+ return ((dma64_addr_t) page_to_phys(page) +
+ (dma64_addr_t) offset);
+}
+
+static inline struct page *
+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ return virt_to_page(__va(dma_addr));
+}
+
+static inline unsigned long
+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ return (dma_addr & ~PAGE_MASK);
+}
+
+static inline void
+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
+{
+}
+
+static inline void
+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
+{
+ flush_write_buffers();
+}
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
+{
+}
+
+#endif /* __KERNEL__ */
+
+/* generic pci stuff */
+#ifdef CONFIG_PCI
+#include <asm-generic/pci.h>
+#endif
+
+/* On Xen we have to scan all functions since Xen hides bridges from
+ * us. If a bridge is at fn=0 and that slot has a multifunction
+ * device, we won't find the additional devices without scanning all
+ * functions. */
+#undef pcibios_scan_all_fns
+#define pcibios_scan_all_fns(a, b) 1
+
+#endif /* __x8664_PCI_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pda.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pda.h
new file mode 100644
index 0000000000..76dac1c98b
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pda.h
@@ -0,0 +1,85 @@
+#ifndef X86_64_PDA_H
+#define X86_64_PDA_H
+
+#ifndef __ASSEMBLY__
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/cache.h>
+
+/* Per processor datastructure. %gs points to it while the kernel runs */
+struct x8664_pda {
+ struct task_struct *pcurrent; /* Current process */
+ unsigned long data_offset; /* Per cpu data offset from linker address */
+ struct x8664_pda *me; /* Pointer to itself */
+ unsigned long kernelstack; /* top of kernel stack for current */
+ unsigned long oldrsp; /* user rsp for system call */
+ unsigned long irqrsp; /* Old rsp for interrupts. */
+ int irqcount; /* Irq nesting counter. Starts with -1 */
+ int cpunumber; /* Logical CPU number */
+ char *irqstackptr; /* top of irqstack */
+ unsigned int __softirq_pending;
+ unsigned int __nmi_count; /* number of NMI on this CPUs */
+ unsigned long idle_timestamp;
+ struct mm_struct *active_mm;
+ int mmu_state;
+ unsigned apic_timer_irqs;
+ int kernel_mode; /* kernel or user mode */
+} ____cacheline_aligned;
+
+
+#define IRQSTACK_ORDER 2
+#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
+
+extern struct x8664_pda cpu_pda[];
+
+/*
+ * There is no fast way to get the base address of the PDA, all the accesses
+ * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
+ */
+#define sizeof_field(type,field) (sizeof(((type *)0)->field))
+#define typeof_field(type,field) typeof(((type *)0)->field)
+
+extern void __bad_pda_field(void);
+
+#define pda_offset(field) offsetof(struct x8664_pda, field)
+
+#define pda_to_op(op,field,val) do { \
+ switch (sizeof_field(struct x8664_pda, field)) { \
+case 2: \
+asm volatile(op "w %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
+case 4: \
+asm volatile(op "l %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
+case 8: \
+asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
+ default: __bad_pda_field(); \
+ } \
+ } while (0)
+
+/*
+ * AK: PDA read accesses should be neither volatile nor have an memory clobber.
+ * Unfortunately removing them causes all hell to break lose currently.
+ */
+#define pda_from_op(op,field) ({ \
+ typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \
+ switch (sizeof_field(struct x8664_pda, field)) { \
+case 2: \
+asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
+case 4: \
+asm volatile(op "l %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
+case 8: \
+asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
+ default: __bad_pda_field(); \
+ } \
+ ret__; })
+
+
+#define read_pda(field) pda_from_op("mov",field)
+#define write_pda(field,val) pda_to_op("mov",field,val)
+#define add_pda(field,val) pda_to_op("add",field,val)
+#define sub_pda(field,val) pda_to_op("sub",field,val)
+
+#endif
+
+#define PDA_STACKOFFSET (5*8)
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgalloc.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgalloc.h
new file mode 100644
index 0000000000..325d700c3b
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgalloc.h
@@ -0,0 +1,171 @@
+#ifndef _X86_64_PGALLOC_H
+#define _X86_64_PGALLOC_H
+
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <asm/pda.h>
+#include <linux/threads.h>
+#include <linux/mm.h>
+#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
+
+void make_page_readonly(void *va);
+void make_page_writable(void *va);
+void make_pages_readonly(void *va, unsigned int nr);
+void make_pages_writable(void *va, unsigned int nr);
+
+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+ set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
+{
+ set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
+}
+
+/*
+ * We need to use the batch mode here, but pgd_pupulate() won't be
+ * be called frequently.
+ */
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
+ set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
+}
+
+extern __inline__ pmd_t *get_pmd(void)
+{
+ pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
+ if (!pmd)
+ return NULL;
+ make_page_readonly(pmd);
+ xen_pmd_pin(__pa(pmd));
+ return pmd;
+}
+
+extern __inline__ void pmd_free(pmd_t *pmd)
+{
+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+ xen_pmd_unpin(__pa(pmd));
+ make_page_writable(pmd);
+ free_page((unsigned long)pmd);
+}
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!pmd)
+ return NULL;
+ make_page_readonly(pmd);
+ xen_pmd_pin(__pa(pmd));
+ return pmd;
+}
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!pud)
+ return NULL;
+ make_page_readonly(pud);
+ xen_pud_pin(__pa(pud));
+ return pud;
+}
+
+static inline void pud_free(pud_t *pud)
+{
+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+ xen_pud_unpin(__pa(pud));
+ make_page_writable(pud);
+ free_page((unsigned long)pud);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ /*
+ * We allocate two contiguous pages for kernel and user.
+ */
+ unsigned boundary;
+ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
+
+ if (!pgd)
+ return NULL;
+ /*
+ * Copy kernel pointers in from init.
+ * Could keep a freelist or slab cache of those because the kernel
+ * part never changes.
+ */
+ boundary = pgd_index(__PAGE_OFFSET);
+ memset(pgd, 0, boundary * sizeof(pgd_t));
+ memcpy(pgd + boundary,
+ init_level4_pgt + boundary,
+ (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
+
+ memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
+ make_pages_readonly(pgd, 2);
+
+ xen_pgd_pin(__pa(pgd)); /* kernel */
+ xen_pgd_pin(__pa(__user_pgd(pgd))); /* user */
+ /*
+ * Set level3_user_pgt for vsyscall area
+ */
+ set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
+ mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
+ return pgd;
+}
+
+static inline void pgd_free(pgd_t *pgd)
+{
+ BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
+ xen_pgd_unpin(__pa(pgd));
+ xen_pgd_unpin(__pa(__user_pgd(pgd)));
+ make_pages_writable(pgd, 2);
+ free_pages((unsigned long)pgd, 1);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!pte)
+ return NULL;
+ make_page_readonly(pte);
+ xen_pte_pin(__pa(pte));
+ return pte;
+}
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!pte)
+ return NULL;
+ make_page_readonly(pte);
+ xen_pte_pin(__pa(pte));
+ return virt_to_page((unsigned long)pte);
+}
+
+/* Should really implement gc for free page table pages. This could be
+ done with a reference count in struct page. */
+
+extern __inline__ void pte_free_kernel(pte_t *pte)
+{
+ BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
+ xen_pte_unpin(__pa(pte));
+ make_page_writable(pte);
+ free_page((unsigned long)pte);
+}
+
+extern void pte_free(struct page *pte);
+
+//#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+
+#define __pte_free_tlb(tlb,x) pte_free((x))
+#define __pmd_free_tlb(tlb,x) pmd_free((x))
+#define __pud_free_tlb(tlb,x) pud_free((x))
+
+#endif /* _X86_64_PGALLOC_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h
new file mode 100644
index 0000000000..5010b3d2c6
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h
@@ -0,0 +1,527 @@
+#ifndef _X86_64_PGTABLE_H
+#define _X86_64_PGTABLE_H
+
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the x86-64 page table tree.
+ *
+ * x86-64 has a 4 level table setup. Generic linux MM only supports
+ * three levels. The fourth level is currently a single static page that
+ * is shared by everybody and just contains a pointer to the current
+ * three level page setup on the beginning and some kernel mappings at
+ * the end. For more details see Documentation/x86_64/mm.txt
+ */
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <asm/bitops.h>
+#include <linux/threads.h>
+#include <asm/pda.h>
+#include <asm-xen/hypervisor.h>
+extern pud_t level3_user_pgt[512];
+extern pud_t init_level4_pgt[];
+extern pud_t init_level4_user_pgt[];
+extern unsigned long __supported_pte_mask;
+
+#define swapper_pg_dir NULL
+
+extern int nonx_setup(char *str);
+extern void paging_init(void);
+extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
+
+extern unsigned long pgkern_mask;
+
+#define arbitrary_virt_to_machine(__va) ({0;})
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#define PGDIR_SHIFT 39
+#define PTRS_PER_PGD 512
+
+/*
+ * PUDIR_SHIFT determines what a top-level page table entry can map
+ */
+#define PUD_SHIFT 30
+#define PTRS_PER_PUD 512
+
+/*
+ * PMD_SHIFT determines the size of the area a middle-level
+ * page table can map
+ */
+#define PMD_SHIFT 21
+#define PTRS_PER_PMD 512
+
+/*
+ * entries per page directory level
+ */
+#define PTRS_PER_PTE 512
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
+
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
+
+#define pgd_none(x) (!pgd_val(x))
+#define pud_none(x) (!pud_val(x))
+
+#define set_pte_batched(pteptr, pteval) \
+ queue_l1_entry_update(pteptr, (pteval))
+
+extern inline int pud_present(pud_t pud) { return !pud_none(pud); }
+
+#ifdef CONFIG_SMP
+#define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval).pte)
+
+#else
+#define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval.pte))
+#if 0
+static inline void set_pte(pte_t *dst, pte_t val)
+{
+ *dst = val;
+}
+#endif
+#endif
+
+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
+#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
+#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
+
+extern inline void pud_clear (pud_t * pud)
+{
+ set_pud(pud, __pud(0));
+}
+
+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
+
+extern inline void pgd_clear (pgd_t * pgd)
+{
+ set_pgd(pgd, __pgd(0));
+ set_pgd(__user_pgd(pgd), __pgd(0));
+}
+
+#define pud_page(pud) \
+ ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
+
+/*
+ * A note on implementation of this atomic 'get-and-clear' operation.
+ * This is actually very simple because Xen Linux can only run on a single
+ * processor. Therefore, we cannot race other processors setting the 'accessed'
+ * or 'dirty' bits on a page-table entry.
+ * Even if pages are shared between domains, that is not a problem because
+ * each domain will have separate page tables, with their own versions of
+ * accessed & dirty state.
+ */
+static inline pte_t ptep_get_and_clear(pte_t *xp)
+{
+ pte_t pte = *xp;
+ if (pte.pte)
+ set_pte(xp, __pte_ma(0));
+ return pte;
+}
+
+#define pte_same(a, b) ((a).pte == (b).pte)
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR 0
+
+#ifndef __ASSEMBLY__
+#define MAXMEM 0x3fffffffffffUL
+#define VMALLOC_START 0xffffc20000000000UL
+#define VMALLOC_END 0xffffe1ffffffffffUL
+#define MODULES_VADDR 0xffffffff88000000UL
+#define MODULES_END 0xfffffffffff00000UL
+#define MODULES_LEN (MODULES_END - MODULES_VADDR)
+
+#define _PAGE_BIT_PRESENT 0
+#define _PAGE_BIT_RW 1
+#define _PAGE_BIT_USER 2
+#define _PAGE_BIT_PWT 3
+#define _PAGE_BIT_PCD 4
+#define _PAGE_BIT_ACCESSED 5
+#define _PAGE_BIT_DIRTY 6
+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+#define _PAGE_PRESENT 0x001
+#define _PAGE_RW 0x002
+#define _PAGE_USER 0x004
+#define _PAGE_PWT 0x008
+#define _PAGE_PCD 0x010
+#define _PAGE_ACCESSED 0x020
+#define _PAGE_DIRTY 0x040
+#define _PAGE_PSE 0x080 /* 2MB page */
+#define _PAGE_FILE 0x040 /* set:pagecache, unset:swap */
+#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
+
+#define _PAGE_PROTNONE 0x080 /* If not present */
+#define _PAGE_NX (1UL<<_PAGE_BIT_NX)
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE _PAGE_TABLE
+
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY PAGE_COPY_NOEXEC
+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define __PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
+#define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_USER )
+#define __PAGE_KERNEL_NOCACHE \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
+#define __PAGE_KERNEL_RO \
+ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
+#define __PAGE_KERNEL_VSYSCALL \
+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_USER )
+#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD | _PAGE_USER )
+#define __PAGE_KERNEL_LARGE \
+ (__PAGE_KERNEL | _PAGE_PSE | _PAGE_USER )
+
+
+/*
+ * We don't support GLOBAL page in xenolinux64
+ */
+#define MAKE_GLOBAL(x) __pgprot((x))
+
+#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
+#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
+#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
+
+/* xwr */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+static inline unsigned long pgd_bad(pgd_t pgd)
+{
+ unsigned long val = pgd_val(pgd);
+ val &= ~PTE_MASK;
+ val &= ~(_PAGE_USER | _PAGE_DIRTY);
+ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
+}
+
+static inline unsigned long pud_bad(pud_t pud)
+{
+ unsigned long val = pud_val(pud);
+ val &= ~PTE_MASK;
+ val &= ~(_PAGE_USER | _PAGE_DIRTY);
+ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
+}
+
+#define pte_none(x) (!(x).pte)
+#define pte_present(x) ((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+/*
+ * We detect special mappings in one of two ways:
+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
+ * to be outside our maximum possible pseudophys range.
+ * 2. If the MFN belongs to a different domain then we will certainly
+ * not have MFN in our p2m table. Conversely, if the page is ours,
+ * then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !pfn_valid() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ *
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ * require. In all the cases we care about, the high bit gets shifted out
+ * (e.g., phys_to_machine()) so behaviour there is correct.
+ */
+#define INVALID_P2M_ENTRY (~0UL)
+#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
+#define pte_pfn(_pte) \
+({ \
+ unsigned long mfn = (_pte).pte >> PAGE_SHIFT; \
+ unsigned pfn = mfn_to_pfn(mfn); \
+ if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
+ pfn = max_mapnr; /* special: force !pfn_valid() */ \
+ pfn; \
+})
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+{
+ pte_t pte;
+
+ (pte).pte = (pfn_to_mfn(page_nr) << PAGE_SHIFT);
+ (pte).pte |= pgprot_val(pgprot);
+ (pte).pte &= __supported_pte_mask;
+ return pte;
+}
+
+#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+#define __pte_val(x) ((x).pte)
+
+static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
+extern inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
+extern inline int pte_exec(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
+extern inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
+extern inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
+extern inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
+static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
+
+extern inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
+extern inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
+extern inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
+extern inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+extern inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
+extern inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
+extern inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
+extern inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
+extern inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
+
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int ret = pte_dirty(pte);
+ if (ret)
+ xen_l1_entry_update(ptep, pte_mkclean(pte).pte);
+ return ret;
+}
+
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int ret = pte_young(pte);
+ if (ret)
+ xen_l1_entry_update(ptep, pte_mkold(pte).pte);
+ return ret;
+}
+
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (pte_write(pte))
+ set_pte(ptep, pte_wrprotect(pte));
+}
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_dirty(pte))
+ xen_l1_entry_update(ptep, pte_mkdirty(pte).pte);
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
+
+#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
+static inline int pmd_large(pmd_t pte) {
+ return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
+}
+
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+/*
+ * Level 4 access.
+ * Never use these in the common code.
+ */
+#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+#define pgd_offset_k(address) (pgd_t *)(init_level4_pgt + pgd_index(address))
+#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
+#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
+
+/* PUD - Level3 access */
+/* to find an entry in a page-table-directory. */
+#define pud_index(address) ((address >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
+static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
+{
+ return pud + pud_index(address);
+}
+
+/* Find correct pud via the hidden fourth level page level: */
+
+/* This accesses the reference page table of the boot cpu.
+ Other CPUs get synced lazily via the page fault handler. */
+static inline pud_t *pud_offset_k(unsigned long address)
+{
+ unsigned long addr;
+
+ addr = pud_val(init_level4_pgt[pud_index(address)]);
+ addr &= PHYSICAL_PAGE_MASK; /* machine physical */
+ addr = machine_to_phys(addr);
+ return __pud_offset_k((pud_t *)__va(addr), address);
+}
+
+/* PMD - Level 2 access */
+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
+ pmd_index(address))
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+#define pmd_bad(x) ((pmd_val(x) & ~PTE_MASK) != _KERNPG_TABLE )
+#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pmd_pfn(x) ((pmd_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK)
+
+#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
+#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
+
+/* PTE - Level 1 access. */
+
+/* page, protection -> pte */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
+
+/* physical address -> PTE */
+static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
+{
+ pte_t pte;
+ (pte).pte = physpage | pgprot_val(pgprot);
+ return pte;
+}
+
+/* Change flags of a PTE */
+extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ (pte).pte &= _PAGE_CHG_MASK;
+ (pte).pte |= pgprot_val(newprot);
+ (pte).pte &= __supported_pte_mask;
+ return pte;
+}
+
+#define pte_index(address) \
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
+ pte_index(address))
+
+/* x86-64 always has all page tables mapped. */
+#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
+#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
+#define pte_unmap(pte) /* NOP */
+#define pte_unmap_nested(pte) /* NOP */
+
+#define update_mmu_cache(vma,address,pte) do { } while (0)
+
+/* We only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time. */
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+ do { \
+ if (__dirty) { \
+ set_pte(__ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ } \
+ } while (0)
+
+/* Encode and de-code a swap entry */
+#define __swp_type(x) (((x).val >> 1) & 0x3f)
+#define __swp_offset(x) ((x).val >> 8)
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#endif /* !__ASSEMBLY__ */
+
+extern int kern_addr_valid(unsigned long addr);
+
+#define DOMID_LOCAL (0xFFFFU)
+
+int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid);
+int __direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t *v);
+
+#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
+ remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#define pgtable_cache_init() do { } while (0)
+#define check_pgt_cache() do { } while (0)
+
+#define PAGE_AGP PAGE_KERNEL_NOCACHE
+#define HAVE_PAGE_AGP 1
+
+/* fs/proc/kcore.c */
+#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
+#define kc_offset_to_vaddr(o) \
+ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+#include <asm-generic/pgtable.h>
+
+#endif /* _X86_64_PGTABLE_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/processor.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/processor.h
new file mode 100644
index 0000000000..e4a683206f
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/processor.h
@@ -0,0 +1,474 @@
+/*
+ * include/asm-x86_64/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_X86_64_PROCESSOR_H
+#define __ASM_X86_64_PROCESSOR_H
+
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/sigcontext.h>
+#include <asm/cpufeature.h>
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/msr.h>
+#include <asm/current.h>
+#include <asm/system.h>
+#include <asm/mmsegment.h>
+#include <asm/percpu.h>
+#include <linux/personality.h>
+
+#define TF_MASK 0x00000100
+#define IF_MASK 0x00000200
+#define IOPL_MASK 0x00003000
+#define NT_MASK 0x00004000
+#define VM_MASK 0x00020000
+#define AC_MASK 0x00040000
+#define VIF_MASK 0x00080000 /* virtual interrupt flag */
+#define VIP_MASK 0x00100000 /* virtual interrupt pending */
+#define ID_MASK 0x00200000
+
+#define desc_empty(desc) \
+ (!((desc)->a + (desc)->b))
+
+#define desc_equal(desc1, desc2) \
+ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
+
+/*
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ */
+
+struct cpuinfo_x86 {
+ __u8 x86; /* CPU family */
+ __u8 x86_vendor; /* CPU vendor */
+ __u8 x86_model;
+ __u8 x86_mask;
+ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
+ __u32 x86_capability[NCAPINTS];
+ char x86_vendor_id[16];
+ char x86_model_id[64];
+ int x86_cache_size; /* in KB */
+ int x86_clflush_size;
+ int x86_cache_alignment;
+ int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
+ __u8 x86_virt_bits, x86_phys_bits;
+ __u8 x86_num_cores;
+ __u8 x86_apicid;
+ __u32 x86_power;
+ __u32 x86_cpuid_level; /* Max CPUID function supported */
+ unsigned long loops_per_jiffy;
+} ____cacheline_aligned;
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_RISE 6
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NUM 8
+#define X86_VENDOR_UNKNOWN 0xff
+
+#ifdef CONFIG_SMP
+extern struct cpuinfo_x86 cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+#endif
+
+extern char ignore_irq13;
+
+extern void identify_cpu(struct cpuinfo_x86 *);
+extern void print_cpu_info(struct cpuinfo_x86 *);
+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+extern void dodgy_tsc(void);
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
+#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
+#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
+#define X86_CR4_DE 0x0008 /* enable debugging extensions */
+#define X86_CR4_PSE 0x0010 /* enable page size extensions */
+#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
+#define X86_CR4_MCE 0x0040 /* Machine check enable */
+#define X86_CR4_PGE 0x0080 /* enable global pages */
+#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
+#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
+
+/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+ * after us can get the correct flags.
+ */
+extern unsigned long mmu_cr4_features;
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+ mmu_cr4_features |= mask;
+ switch (mask) {
+ case X86_CR4_OSFXSR:
+ case X86_CR4_OSXMMEXCPT:
+ break;
+ default:
+ do {
+ const char *msg = "Xen unsupported cr4 update\n";
+ (void)HYPERVISOR_console_io(
+ CONSOLEIO_write, __builtin_strlen(msg),
+ (char *)msg);
+ BUG();
+ } while (0);
+ }
+}
+
+#define load_cr3(pgdir) do { \
+ xen_pt_switch(__pa(pgdir)); \
+ per_cpu(cur_pgd, smp_processor_id()) = pgdir; \
+} while (/* CONSTCOND */0)
+
+/*
+ * Bus types
+ */
+#define MCA_bus 0
+#define MCA_bus__is_a_macro
+
+
+/*
+ * User space process size. 47bits.
+ */
+#define TASK_SIZE (0x800000000000UL)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
+#define TASK_UNMAPPED_32 PAGE_ALIGN(IA32_PAGE_OFFSET/3)
+#define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE/3)
+#define TASK_UNMAPPED_BASE \
+ (test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
+
+/*
+ * Size of io_bitmap.
+ */
+#define IO_BITMAP_BITS 65536
+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+
+struct i387_fxsave_struct {
+ u16 cwd;
+ u16 swd;
+ u16 twd;
+ u16 fop;
+ u64 rip;
+ u64 rdp;
+ u32 mxcsr;
+ u32 mxcsr_mask;
+ u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
+ u32 padding[24];
+} __attribute__ ((aligned (16)));
+
+union i387_union {
+ struct i387_fxsave_struct fxsave;
+};
+
+struct tss_struct {
+ u32 reserved1;
+ u64 rsp0;
+ u64 rsp1;
+ u64 rsp2;
+ u64 reserved2;
+ u64 ist[7];
+ u32 reserved3;
+ u32 reserved4;
+ u16 reserved5;
+ u16 io_bitmap_base;
+ /*
+ * The extra 1 is there because the CPU will access an
+ * additional byte beyond the end of the IO permission
+ * bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit. Thus we have:
+ *
+ * 128 bytes, the bitmap itself, for ports 0..0x3ff
+ * 8 bytes, for an extra "long" of ~0UL
+ */
+ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
+} __attribute__((packed)) ____cacheline_aligned;
+
+extern struct cpuinfo_x86 boot_cpu_data;
+DECLARE_PER_CPU(struct tss_struct,init_tss);
+DECLARE_PER_CPU(pgd_t *, cur_pgd);
+
+#define ARCH_MIN_TASKALIGN 16
+
+struct thread_struct {
+ unsigned long rsp0;
+ unsigned long rsp;
+ unsigned long userrsp; /* Copy from PDA */
+ unsigned long fs;
+ unsigned long gs;
+ unsigned int io_pl;
+ unsigned short es, ds, fsindex, gsindex;
+/* Hardware debugging registers */
+ unsigned long debugreg0;
+ unsigned long debugreg1;
+ unsigned long debugreg2;
+ unsigned long debugreg3;
+ unsigned long debugreg6;
+ unsigned long debugreg7;
+/* fault info */
+ unsigned long cr2, trap_no, error_code;
+/* floating point info */
+ union i387_union i387 __attribute__((aligned(16)));
+/* IO permissions. the bitmap could be moved into the GDT, that would make
+ switch faster for a limited number of ioperm using tasks. -AK */
+ int ioperm;
+ unsigned long *io_bitmap_ptr;
+ unsigned io_bitmap_max;
+/* cached TLS descriptors. */
+ u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
+} __attribute__((aligned(16)));
+
+#define INIT_THREAD {}
+
+#define INIT_MMAP \
+{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
+
+#define STACKFAULT_STACK 1
+#define DOUBLEFAULT_STACK 2
+#define NMI_STACK 3
+#define DEBUG_STACK 4
+#define MCE_STACK 5
+#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
+#define EXCEPTION_STACK_ORDER 0
+
+#define start_thread(regs,new_rip,new_rsp) do { \
+ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
+ load_gs_index(0); \
+ (regs)->rip = (new_rip); \
+ (regs)->rsp = (new_rsp); \
+ write_pda(oldrsp, (new_rsp)); \
+ (regs)->cs = __USER_CS; \
+ (regs)->ss = __USER_DS; \
+ (regs)->eflags = 0x200; \
+ set_fs(USER_DS); \
+} while(0)
+
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+extern void prepare_to_copy(struct task_struct *tsk);
+
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+/*
+ * Return saved PC of a blocked thread.
+ * What is this good for? it will be always the scheduler or ret_from_fork.
+ */
+#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
+
+extern unsigned long get_wchan(struct task_struct *p);
+#define KSTK_EIP(tsk) \
+ (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
+#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
+
+
+struct microcode_header {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int datasize;
+ unsigned int totalsize;
+ unsigned int reserved[3];
+};
+
+struct microcode {
+ struct microcode_header hdr;
+ unsigned int bits[0];
+};
+
+typedef struct microcode microcode_t;
+typedef struct microcode_header microcode_header_t;
+
+/* microcode format is extended from prescott processors */
+struct extended_signature {
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int cksum;
+};
+
+struct extended_sigtable {
+ unsigned int count;
+ unsigned int cksum;
+ unsigned int reserved[3];
+ struct extended_signature sigs[0];
+};
+
+/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
+#define MICROCODE_IOCFREE _IO('6',0)
+
+
+#define ASM_NOP1 K8_NOP1
+#define ASM_NOP2 K8_NOP2
+#define ASM_NOP3 K8_NOP3
+#define ASM_NOP4 K8_NOP4
+#define ASM_NOP5 K8_NOP5
+#define ASM_NOP6 K8_NOP6
+#define ASM_NOP7 K8_NOP7
+#define ASM_NOP8 K8_NOP8
+
+/* Opteron nops */
+#define K8_NOP1 ".byte 0x90\n"
+#define K8_NOP2 ".byte 0x66,0x90\n"
+#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
+#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
+#define K8_NOP5 K8_NOP3 K8_NOP2
+#define K8_NOP6 K8_NOP3 K8_NOP3
+#define K8_NOP7 K8_NOP4 K8_NOP3
+#define K8_NOP8 K8_NOP4 K8_NOP4
+
+#define ASM_NOP_MAX 8
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+extern inline void rep_nop(void)
+{
+ __asm__ __volatile__("rep;nop": : :"memory");
+}
+
+/* Stop speculative execution */
+extern inline void sync_core(void)
+{
+ int tmp;
+ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
+}
+
+#define cpu_has_fpu 1
+
+#define ARCH_HAS_PREFETCH
+static inline void prefetch(void *x)
+{
+ asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
+}
+
+#define ARCH_HAS_PREFETCHW 1
+static inline void prefetchw(void *x)
+{
+ alternative_input(ASM_NOP5,
+ "prefetchw (%1)",
+ X86_FEATURE_3DNOW,
+ "r" (x));
+}
+
+#define ARCH_HAS_SPINLOCK_PREFETCH 1
+
+#define spin_lock_prefetch(x) prefetchw(x)
+
+#define cpu_relax() rep_nop()
+
+/*
+ * NSC/Cyrix CPU configuration register indexes
+ */
+#define CX86_CCR0 0xc0
+#define CX86_CCR1 0xc1
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_CCR6 0xea
+#define CX86_CCR7 0xeb
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+#define CX86_ARR_BASE 0xc4
+#define CX86_RCR_BASE 0xdc
+
+/*
+ * NSC/Cyrix CPU indexed register access macros
+ */
+
+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+#define setCx86(reg, data) do { \
+ outb((reg), 0x22); \
+ outb((data), 0x23); \
+} while (0)
+
+static inline void __monitor(const void *eax, unsigned long ecx,
+ unsigned long edx)
+{
+ /* "monitor %eax,%ecx,%edx;" */
+ asm volatile(
+ ".byte 0x0f,0x01,0xc8;"
+ : :"a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+ /* "mwait %eax,%ecx;" */
+ asm volatile(
+ ".byte 0x0f,0x01,0xc9;"
+ : :"a" (eax), "c" (ecx));
+}
+
+#define stack_current() \
+({ \
+ struct thread_info *ti; \
+ asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+ ti->task; \
+})
+
+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+
+extern unsigned long boot_option_idle_override;
+/* Boot loader type from the setup header */
+extern int bootloader_type;
+
+#endif /* __ASM_X86_64_PROCESSOR_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/ptrace.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/ptrace.h
new file mode 100644
index 0000000000..2af8edd82f
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/ptrace.h
@@ -0,0 +1,119 @@
+#ifndef _X86_64_PTRACE_H
+#define _X86_64_PTRACE_H
+
+#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
+#define R15 0
+#define R14 8
+#define R13 16
+#define R12 24
+#define RBP 32
+#define RBX 40
+/* arguments: interrupts/non tracing syscalls only save upto here*/
+#define R11 48
+#define R10 56
+#define R9 64
+#define R8 72
+#define RAX 80
+#define RCX 88
+#define RDX 96
+#define RSI 104
+#define RDI 112
+#define ORIG_RAX 120 /* = ERROR */
+/* end of arguments */
+/* cpu exception frame or undefined in case of fast syscall. */
+#define RIP 128
+#define CS 136
+#define EFLAGS 144
+#define RSP 152
+#define SS 160
+#define ARGOFFSET R11
+#endif /* __ASSEMBLY__ */
+
+/* top of stack page */
+#define FRAME_SIZE 168
+
+#define PTRACE_OLDSETOPTIONS 21
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long rbp;
+ unsigned long rbx;
+/* arguments: non interrupts/non tracing syscalls only save upto here*/
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long rax;
+ unsigned long rcx;
+ unsigned long rdx;
+ unsigned long rsi;
+ unsigned long rdi;
+ unsigned long orig_rax;
+/* end of arguments */
+/* cpu exception frame or undefined */
+ unsigned long rip;
+ unsigned long cs;
+ unsigned long eflags;
+ unsigned long rsp;
+ unsigned long ss;
+/* top of stack page */
+};
+
+#endif
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+#define PTRACE_GETFPXREGS 18
+#define PTRACE_SETFPXREGS 19
+
+/* only useful for access 32bit programs */
+#define PTRACE_GET_THREAD_AREA 25
+#define PTRACE_SET_THREAD_AREA 26
+
+#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+#define user_mode(regs) (!!((regs)->cs & 3))
+#define instruction_pointer(regs) ((regs)->rip)
+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
+
+enum {
+ EF_CF = 0x00000001,
+ EF_PF = 0x00000004,
+ EF_AF = 0x00000010,
+ EF_ZF = 0x00000040,
+ EF_SF = 0x00000080,
+ EF_TF = 0x00000100,
+ EF_IE = 0x00000200,
+ EF_DF = 0x00000400,
+ EF_OF = 0x00000800,
+ EF_IOPL = 0x00003000,
+ EF_IOPL_RING0 = 0x00000000,
+ EF_IOPL_RING1 = 0x00001000,
+ EF_IOPL_RING2 = 0x00002000,
+ EF_NT = 0x00004000, /* nested task */
+ EF_RF = 0x00010000, /* resume */
+ EF_VM = 0x00020000, /* virtual mode */
+ EF_AC = 0x00040000, /* alignment */
+ EF_VIF = 0x00080000, /* virtual interrupt */
+ EF_VIP = 0x00100000, /* virtual interrupt pending */
+ EF_ID = 0x00200000, /* id */
+};
+
+#endif
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/segment.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/segment.h
new file mode 100644
index 0000000000..db5926a999
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/segment.h
@@ -0,0 +1,47 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+#include <asm/cache.h>
+
+#define __KERNEL_CS 0x10
+#define __KERNEL_DS 0x1b
+
+#define __KERNEL32_CS 0x3b
+
+/*
+ * we cannot use the same code segment descriptor for user and kernel
+ * -- not even in the long flat mode, because of different DPL /kkeil
+ * The segment offset needs to contain a RPL. Grr. -AK
+ * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets)
+ */
+
+#define __USER32_CS 0x23 /* 4*8+3 */
+#define __USER_DS 0x2b /* 5*8+3 */
+#define __USER_CS 0x33 /* 6*8+3 */
+#define __USER32_DS __USER_DS
+#define __KERNEL16_CS (GDT_ENTRY_KERNELCS16 * 8)
+#define __KERNEL_COMPAT32_CS 0x8
+
+#define GDT_ENTRY_TLS 1
+#define GDT_ENTRY_TSS 8 /* needs two entries */
+#define GDT_ENTRY_LDT 10
+#define GDT_ENTRY_TLS_MIN 11
+#define GDT_ENTRY_TLS_MAX 13
+/* 14 free */
+#define GDT_ENTRY_KERNELCS16 15
+
+#define GDT_ENTRY_TLS_ENTRIES 3
+
+/* TLS indexes for 64bit - hardcoded in arch_prctl */
+#define FS_TLS 0
+#define GS_TLS 1
+
+#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
+#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
+
+#define IDT_ENTRIES 256
+#define GDT_ENTRIES 16
+#define GDT_SIZE (GDT_ENTRIES * 8)
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/smp.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/smp.h
new file mode 100644
index 0000000000..82b5cc2b9f
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/smp.h
@@ -0,0 +1,154 @@
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+/*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
+#ifndef __ASSEMBLY__
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/bitops.h>
+extern int disable_apic;
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#ifndef __ASSEMBLY__
+#include <asm/fixmap.h>
+#include <asm/mpspec.h>
+#ifdef CONFIG_X86_IO_APIC
+#include <asm/io_apic.h>
+#endif
+#include <asm/apic.h>
+#include <asm/thread_info.h>
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+#ifndef ASSEMBLY
+
+#include <asm/pda.h>
+
+struct pt_regs;
+
+/*
+ * Private routines/data
+ */
+
+extern void smp_alloc_memory(void);
+extern cpumask_t cpu_online_map;
+extern volatile unsigned long smp_invalidate_needed;
+extern int pic_mode;
+extern int smp_num_siblings;
+extern void smp_flush_tlb(void);
+extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
+extern void smp_send_reschedule(int cpu);
+extern void smp_invalidate_rcv(void); /* Process an NMI */
+extern void (*mtrr_hook) (void);
+extern void zap_low_mappings(void);
+void smp_stop_cpu(void);
+extern cpumask_t cpu_sibling_map[NR_CPUS];
+extern u8 phys_proc_id[NR_CPUS];
+
+#define SMP_TRAMPOLINE_BASE 0x6000
+
+/*
+ * On x86 all CPUs are mapped 1:1 to the APIC space.
+ * This simplifies scheduling and IPI sending and
+ * compresses data structures.
+ */
+
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_callin_map;
+#define cpu_possible_map cpu_callout_map
+
+static inline int num_booting_cpus(void)
+{
+ return cpus_weight(cpu_callout_map);
+}
+
+#define __smp_processor_id() read_pda(cpunumber)
+
+#ifdef CONFIG_X86_LOCAL_APIC
+extern __inline int hard_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
+}
+#endif
+
+#define safe_smp_processor_id() (disable_apic ? 0 : x86_apicid_to_cpu(hard_smp_processor_id()))
+
+#endif /* !ASSEMBLY */
+
+#define NO_PROC_ID 0xFF /* No processor magic marker */
+
+#endif
+
+#ifndef ASSEMBLY
+/*
+ * Some lowlevel functions might want to know about
+ * the real APIC ID <-> CPU # mapping.
+ */
+extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
+extern u8 x86_cpu_to_log_apicid[NR_CPUS];
+extern u8 bios_cpu_apicid[];
+#ifdef CONFIG_X86_LOCAL_APIC
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+ return cpus_addr(cpumask)[0];
+}
+
+static inline int x86_apicid_to_cpu(u8 apicid)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; ++i)
+ if (x86_cpu_to_apicid[i] == apicid)
+ return i;
+
+ /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
+ * or called too early. Either way, we must be CPU 0. */
+ if (x86_cpu_to_apicid[0] == BAD_APICID)
+ return 0;
+
+ return -1;
+}
+
+static inline int cpu_present_to_apicid(int mps_cpu)
+{
+ if (mps_cpu < NR_CPUS)
+ return (int)bios_cpu_apicid[mps_cpu];
+ else
+ return BAD_APICID;
+}
+#endif
+
+#endif /* !ASSEMBLY */
+
+#ifndef CONFIG_SMP
+#define stack_smp_processor_id() 0
+#define safe_smp_processor_id() 0
+#define cpu_logical_map(x) (x)
+#else
+#include <asm/thread_info.h>
+#define stack_smp_processor_id() \
+({ \
+ struct thread_info *ti; \
+ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+ ti->cpu; \
+})
+#endif
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_X86_LOCAL_APIC
+static __inline int logical_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+}
+#endif
+#endif
+
+#endif
+
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/synch_bitops.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/synch_bitops.h
new file mode 100644
index 0000000000..dea3693a67
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/synch_bitops.h
@@ -0,0 +1,85 @@
+#ifndef __XEN_SYNCH_BITOPS_H__
+#define __XEN_SYNCH_BITOPS_H__
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ * Heavily modified to provide guaranteed strong synchronisation
+ * when communicating with Xen or other guest OSes running on other CPUs.
+ *
+ * Copyed from asm-xen/asm-i386
+ */
+
+#include <linux/config.h>
+
+#define ADDR (*(volatile long *) addr)
+
+static __inline__ void synch_set_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btsl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btrl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_change_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btcl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btsl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btrl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__ (
+ "lock btcl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
+{
+ return ((1UL << (nr & 31)) &
+ (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "btl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
+ return oldbit;
+}
+
+#define synch_test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ synch_const_test_bit((nr),(addr)) : \
+ synch_var_test_bit((nr),(addr)))
+
+#endif /* __XEN_SYNCH_BITOPS_H__ */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/system.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/system.h
new file mode 100644
index 0000000000..7c70eac425
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/system.h
@@ -0,0 +1,395 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/segment.h>
+#include <asm/synch_bitops.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/xen-public/arch-x86_64.h>
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_SMP
+#define LOCK_PREFIX "lock ; "
+#else
+#define LOCK_PREFIX ""
+#endif
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
+#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
+
+/* frame pointer must be last for get_wchan */
+#define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t"
+
+#define __EXTRA_CLOBBER \
+ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
+
+#define switch_to(prev,next,last) \
+ asm volatile(SAVE_CONTEXT \
+ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
+ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
+ "call __switch_to\n\t" \
+ ".globl thread_return\n" \
+ "thread_return:\n\t" \
+ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
+ "movq %P[thread_info](%%rsi),%%r8\n\t" \
+ LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "movq %%rax,%%rdi\n\t" \
+ "jc ret_from_fork\n\t" \
+ RESTORE_CONTEXT \
+ : "=a" (last) \
+ : [next] "S" (next), [prev] "D" (prev), \
+ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
+ [ti_flags] "i" (offsetof(struct thread_info, flags)),\
+ [tif_fork] "i" (TIF_FORK), \
+ [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
+ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
+ : "memory", "cc" __EXTRA_CLOBBER)
+
+
+extern void load_gs_index(unsigned);
+
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg,value) \
+ asm volatile("\n" \
+ "1:\t" \
+ "movl %k0,%%" #seg "\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\t" \
+ "movl %1,%%" #seg "\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 8\n\t" \
+ ".quad 1b,3b\n" \
+ ".previous" \
+ : :"r" (value), "r" (0))
+
+#define set_debug(value,register) \
+ __asm__("movq %0,%%db" #register \
+ : /* no output */ \
+ :"r" ((unsigned long) value))
+
+
+#ifdef __KERNEL__
+struct alt_instr {
+ __u8 *instr; /* original instruction */
+ __u8 *replacement;
+ __u8 cpuid; /* cpuid bit set for replacement */
+ __u8 instrlen; /* length of original instruction */
+ __u8 replacementlen; /* length of new instruction, <= instrlen */
+ __u8 pad[5];
+};
+#endif
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, newinstr, feature) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 661b\n" /* label */ \
+ " .quad 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" :: "i" (feature) : "memory")
+
+/*
+ * Alternative inline assembly with input.
+ *
+ * Pecularities:
+ * No memory clobber here.
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the
+ * replacement maake sure to pad to the worst case length.
+ */
+#define alternative_input(oldinstr, newinstr, feature, input...) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 661b\n" /* label */ \
+ " .quad 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" :: "i" (feature), ##input)
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() (HYPERVISOR_fpu_taskswitch(0))
+static inline unsigned long read_cr0(void)
+{
+ BUG();
+}
+
+static inline void write_cr0(unsigned long val)
+{
+ BUG();
+}
+
+static inline unsigned long read_cr3(void)
+{
+ BUG();
+}
+
+static inline unsigned long read_cr4(void)
+{
+ BUG();
+}
+
+static inline void write_cr4(unsigned long val)
+{
+ BUG();
+}
+#define stts() (HYPERVISOR_fpu_taskswitch(1))
+
+#define wbinvd() \
+ __asm__ __volatile__ ("wbinvd": : :"memory");
+
+#endif /* __KERNEL__ */
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr),1))
+
+#define __xg(x) ((volatile long *)(x))
+
+extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
+{
+ *ptr = val;
+}
+
+#define _set_64bit set_64bit
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %k0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 8:
+ __asm__ __volatile__("xchgq %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_read_barrier_depends() do {} while(0)
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do {} while(0)
+#endif
+
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+#define mb() asm volatile("mfence":::"memory")
+#define rmb() asm volatile("lfence":::"memory")
+
+#ifdef CONFIG_UNORDERED_IO
+#define wmb() asm volatile("sfence" ::: "memory")
+#else
+#define wmb() asm volatile("" ::: "memory")
+#endif
+#define read_barrier_depends() do {} while(0)
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
+
+
+/*
+ * The use of 'barrier' in the following reflects their use as local-lock
+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
+ * critical operations are executed. All critical operations must complete
+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
+ * includes these barriers, for example.
+ */
+
+#define __cli() \
+do { \
+ vcpu_info_t *_vcpu; \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu->evtchn_upcall_mask = 1; \
+ preempt_enable_no_resched(); \
+ barrier(); \
+} while (0)
+
+#define __sti() \
+do { \
+ vcpu_info_t *_vcpu; \
+ barrier(); \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu->evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+ preempt_enable(); \
+} while (0)
+
+#define __save_flags(x) \
+do { \
+ vcpu_info_t *_vcpu; \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ (x) = _vcpu->evtchn_upcall_mask; \
+} while (0)
+
+#define __restore_flags(x) \
+do { \
+ vcpu_info_t *_vcpu; \
+ barrier(); \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+ preempt_enable(); \
+ } else \
+ preempt_enable_no_resched(); \
+} while (0)
+
+#define safe_halt() ((void)0)
+
+#define __save_and_cli(x) \
+do { \
+ vcpu_info_t *_vcpu; \
+ preempt_disable(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ (x) = _vcpu->evtchn_upcall_mask; \
+ _vcpu->evtchn_upcall_mask = 1; \
+ preempt_enable_no_resched(); \
+ barrier(); \
+} while (0)
+
+void cpu_idle_wait(void);
+
+#define local_irq_save(x) __save_and_cli(x)
+#define local_irq_restore(x) __restore_flags(x)
+#define local_save_flags(x) __save_flags(x)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+
+#define irqs_disabled() \
+ HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+#define HAVE_EAT_KEY
+void eat_key(void);
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/timer.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/timer.h
new file mode 100644
index 0000000000..40c54f6978
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/timer.h
@@ -0,0 +1,64 @@
+#ifndef _ASMi386_TIMER_H
+#define _ASMi386_TIMER_H
+#include <linux/init.h>
+
+/**
+ * struct timer_ops - used to define a timer source
+ *
+ * @name: name of the timer.
+ * @init: Probes and initializes the timer. Takes clock= override
+ * string as an argument. Returns 0 on success, anything else
+ * on failure.
+ * @mark_offset: called by the timer interrupt.
+ * @get_offset: called by gettimeofday(). Returns the number of microseconds
+ * since the last timer interupt.
+ * @monotonic_clock: returns the number of nanoseconds since the init of the
+ * timer.
+ * @delay: delays this many clock cycles.
+ */
+struct timer_opts {
+ char* name;
+ void (*mark_offset)(void);
+ unsigned long (*get_offset)(void);
+ unsigned long long (*monotonic_clock)(void);
+ void (*delay)(unsigned long);
+};
+
+struct init_timer_opts {
+ int (*init)(char *override);
+ struct timer_opts *opts;
+};
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+extern struct timer_opts* __init select_timer(void);
+extern void clock_fallback(void);
+void setup_pit_timer(void);
+
+/* Modifiers for buggy PIT handling */
+
+extern int pit_latch_buggy;
+
+extern struct timer_opts *cur_timer;
+extern int timer_ack;
+
+/* list of externed timers */
+extern struct timer_opts timer_none;
+extern struct timer_opts timer_pit;
+extern struct init_timer_opts timer_pit_init;
+extern struct init_timer_opts timer_tsc_init;
+#ifdef CONFIG_X86_CYCLONE_TIMER
+extern struct init_timer_opts timer_cyclone_init;
+#endif
+
+extern unsigned long calibrate_tsc(void);
+extern void init_cpu_khz(void);
+#ifdef CONFIG_HPET_TIMER
+extern struct init_timer_opts timer_hpet_init;
+extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
+#endif
+
+#ifdef CONFIG_X86_PM_TIMER
+extern struct init_timer_opts timer_pmtmr_init;
+#endif
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/tlbflush.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/tlbflush.h
new file mode 100644
index 0000000000..35fd9b530d
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/tlbflush.h
@@ -0,0 +1,97 @@
+#ifndef _X8664_TLBFLUSH_H
+#define _X8664_TLBFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <asm/processor.h>
+
+#define __flush_tlb() xen_tlb_flush()
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+#define __flush_tlb_global() xen_tlb_flush()
+
+
+extern unsigned long pgkern_mask;
+
+#define __flush_tlb_all() __flush_tlb_global()
+
+#define __flush_tlb_one(addr) xen_invlpg(addr)
+
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * ..but the x86_64 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb_all()
+#define local_flush_tlb() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm == current->active_mm)
+ __flush_tlb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb_one(addr);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb();
+}
+
+#else
+
+#include <asm/smp.h>
+
+#define local_flush_tlb() \
+ __flush_tlb()
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb() flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+#define TLBSTATE_OK 1
+#define TLBSTATE_LAZY 2
+
+#endif
+
+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* x86_64 does not keep any page table caches in TLB */
+}
+
+#endif /* _X8664_TLBFLUSH_H */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/vga.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/vga.h
new file mode 100644
index 0000000000..14b8209600
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/vga.h
@@ -0,0 +1,20 @@
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+#ifndef _LINUX_ASM_VGA_H_
+#define _LINUX_ASM_VGA_H_
+
+/*
+ * On the PC, we can just recalculate addresses and then
+ * access the videoram directly without any black magic.
+ */
+
+#define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x)
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/xor.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/xor.h
new file mode 100644
index 0000000000..6ec68fd4ff
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/xor.h
@@ -0,0 +1,328 @@
+/*
+ * x86-64 changes / gcc fixes from Andi Kleen.
+ * Copyright 2002 Andi Kleen, SuSE Labs.
+ *
+ * This hasn't been optimized for the hammer yet, but there are likely
+ * no advantages to be gotten from x86-64 here anyways.
+ */
+
+typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
+
+/* Doesn't use gcc to save the XMM registers, because there is no easy way to
+ tell it to do a clts before the register saving. */
+#define XMMS_SAVE do { \
+ preempt_disable(); \
+ if (!(current_thread_info()->status & TS_USEDFPU)) \
+ clts(); \
+ __asm__ __volatile__ ( \
+ "movups %%xmm0,(%1) ;\n\t" \
+ "movups %%xmm1,0x10(%1) ;\n\t" \
+ "movups %%xmm2,0x20(%1) ;\n\t" \
+ "movups %%xmm3,0x30(%1) ;\n\t" \
+ : "=&r" (cr0) \
+ : "r" (xmm_save) \
+ : "memory"); \
+} while(0)
+
+#define XMMS_RESTORE do { \
+ asm volatile ( \
+ "sfence ;\n\t" \
+ "movups (%1),%%xmm0 ;\n\t" \
+ "movups 0x10(%1),%%xmm1 ;\n\t" \
+ "movups 0x20(%1),%%xmm2 ;\n\t" \
+ "movups 0x30(%1),%%xmm3 ;\n\t" \
+ : \
+ : "r" (cr0), "r" (xmm_save) \
+ : "memory"); \
+ if (!(current_thread_info()->status & TS_USEDFPU)) \
+ stts(); \
+ preempt_enable(); \
+} while(0)
+
+#define OFFS(x) "16*("#x")"
+#define PF_OFFS(x) "256+16*("#x")"
+#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
+#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
+#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
+#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
+#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
+#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
+#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
+#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
+#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
+#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
+#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
+#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
+#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
+
+
+static void
+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned int lines = bytes >> 8;
+ unsigned long cr0;
+ xmm_store_t xmm_save[4];
+
+ XMMS_SAVE;
+
+ asm volatile (
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
+ " decl %[cnt] ; jnz 1b"
+ : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
+ : [inc] "r" (256UL)
+ : "memory");
+
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned int lines = bytes >> 8;
+ xmm_store_t xmm_save[4];
+ unsigned long cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
+ " addq %[inc], %[p3] ;\n"
+ " decl %[cnt] ; jnz 1b"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
+ : [inc] "r" (256UL)
+ : "memory");
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned int lines = bytes >> 8;
+ xmm_store_t xmm_save[4];
+ unsigned long cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ PF3(i) \
+ PF3(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ XO3(i,0) \
+ XO3(i+1,1) \
+ XO3(i+2,2) \
+ XO3(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
+ " addq %[inc], %[p3] ;\n"
+ " addq %[inc], %[p4] ;\n"
+ " decl %[cnt] ; jnz 1b"
+ : [cnt] "+c" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
+ : [inc] "r" (256UL)
+ : "memory" );
+
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned int lines = bytes >> 8;
+ xmm_store_t xmm_save[4];
+ unsigned long cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ PF3(i) \
+ PF3(i+2) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ PF4(i) \
+ PF4(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO3(i,0) \
+ XO3(i+1,1) \
+ XO3(i+2,2) \
+ XO3(i+3,3) \
+ XO4(i,0) \
+ XO4(i+1,1) \
+ XO4(i+2,2) \
+ XO4(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
+ " addq %[inc], %[p3] ;\n"
+ " addq %[inc], %[p4] ;\n"
+ " addq %[inc], %[p5] ;\n"
+ " decl %[cnt] ; jnz 1b"
+ : [cnt] "+c" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
+ [p5] "+r" (p5)
+ : [inc] "r" (256UL)
+ : "memory");
+
+ XMMS_RESTORE;
+}
+
+static struct xor_block_template xor_block_sse = {
+ .name = "generic_sse",
+ .do_2 = xor_sse_2,
+ .do_3 = xor_sse_3,
+ .do_4 = xor_sse_4,
+ .do_5 = xor_sse_5,
+};
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_sse); \
+ } while (0)
+
+/* We force the use of the SSE xor block because it can write around L2.
+ We may also be able to load into the L1 only depending on how the cpu
+ deals with a load to a line that is being prefetched. */
+#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/evtchn.h b/linux-2.6.11-xen-sparse/include/asm-xen/evtchn.h
index ed41c4a98d..1fa0530c32 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/evtchn.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/evtchn.h
@@ -36,6 +36,7 @@
#include <asm/ptrace.h>
#include <asm/synch_bitops.h>
#include <asm-xen/xen-public/event_channel.h>
+#include <linux/smp.h>
/*
* LOW-LEVEL DEFINITIONS
@@ -56,6 +57,7 @@ static inline void mask_evtchn(int port)
static inline void unmask_evtchn(int port)
{
shared_info_t *s = HYPERVISOR_shared_info;
+ vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
synch_clear_bit(port, &s->evtchn_mask[0]);
@@ -64,10 +66,10 @@ static inline void unmask_evtchn(int port)
* a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
*/
if ( synch_test_bit (port, &s->evtchn_pending[0]) &&
- !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
+ !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel) )
{
- s->vcpu_data[0].evtchn_upcall_pending = 1;
- if ( !s->vcpu_data[0].evtchn_upcall_mask )
+ vcpu_info->evtchn_upcall_pending = 1;
+ if ( !vcpu_info->evtchn_upcall_mask )
force_evtchn_callback();
}
}
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/gnttab.h b/linux-2.6.11-xen-sparse/include/asm-xen/gnttab.h
new file mode 100644
index 0000000000..642a74dbf9
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/gnttab.h
@@ -0,0 +1,72 @@
+/******************************************************************************
+ * gnttab.h
+ *
+ * Two sets of functionality:
+ * 1. Granting foreign access to our memory reservation.
+ * 2. Accessing others' memory reservations via grant references.
+ * (i.e., mechanisms for both sender and recipient of grant references)
+ *
+ * Copyright (c) 2004, K A Fraser
+ * Copyright (c) 2005, Christopher Clark
+ */
+
+#ifndef __ASM_GNTTAB_H__
+#define __ASM_GNTTAB_H__
+
+#include <linux/config.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/xen-public/grant_table.h>
+
+/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
+#define NR_GRANT_FRAMES 4
+#define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t))
+
+int
+gnttab_grant_foreign_access(
+ domid_t domid, unsigned long frame, int readonly);
+
+void
+gnttab_end_foreign_access(
+ grant_ref_t ref, int readonly);
+
+int
+gnttab_grant_foreign_transfer(
+ domid_t domid, unsigned long pfn);
+
+unsigned long
+gnttab_end_foreign_transfer(
+ grant_ref_t ref);
+
+int
+gnttab_query_foreign_access(
+ grant_ref_t ref );
+
+/*
+ * operations on reserved batches of grant references
+ */
+int
+gnttab_alloc_grant_references(
+ u16 count, grant_ref_t *pprivate_head, grant_ref_t *private_terminal );
+
+void
+gnttab_free_grant_references(
+ u16 count, grant_ref_t private_head );
+
+int
+gnttab_claim_grant_reference( grant_ref_t *pprivate_head, grant_ref_t terminal
+);
+
+void
+gnttab_release_grant_reference(
+ grant_ref_t *private_head, grant_ref_t release );
+
+void
+gnttab_grant_foreign_access_ref(
+ grant_ref_t ref, domid_t domid, unsigned long frame, int readonly);
+
+void
+gnttab_grant_foreign_transfer_ref(
+ grant_ref_t, domid_t domid, unsigned long pfn);
+
+
+#endif /* __ASM_GNTTAB_H__ */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h b/linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h
index 5dc7a4e4ae..5a0da00816 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h
@@ -73,510 +73,68 @@ void lgdt_finish(void);
* be MACHINE addresses.
*/
-extern unsigned int mmu_update_queue_idx;
-
-void queue_l1_entry_update(pte_t *ptr, unsigned long val);
-void queue_l2_entry_update(pmd_t *ptr, pmd_t val);
-void queue_pt_switch(unsigned long ptr);
-void queue_tlb_flush(void);
-void queue_invlpg(unsigned long ptr);
-void queue_pgd_pin(unsigned long ptr);
-void queue_pgd_unpin(unsigned long ptr);
-void queue_pte_pin(unsigned long ptr);
-void queue_pte_unpin(unsigned long ptr);
-void queue_set_ldt(unsigned long ptr, unsigned long bytes);
-void queue_machphys_update(unsigned long mfn, unsigned long pfn);
-void xen_l1_entry_update(pte_t *ptr, unsigned long val);
-void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
void xen_pt_switch(unsigned long ptr);
+void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
+void xen_load_gs(unsigned int selector); /* x86_64 only */
void xen_tlb_flush(void);
void xen_invlpg(unsigned long ptr);
+
+#ifndef CONFIG_XEN_SHADOW_MODE
+void xen_l1_entry_update(pte_t *ptr, unsigned long val);
+void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
+#ifdef __x86_64__
+void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64 only */
+#endif
+void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
void xen_pgd_pin(unsigned long ptr);
void xen_pgd_unpin(unsigned long ptr);
+void xen_pud_pin(unsigned long ptr); /* x86_64 only */
+void xen_pud_unpin(unsigned long ptr); /* x86_64 only */
+void xen_pmd_pin(unsigned long ptr); /* x86_64 only */
+void xen_pmd_unpin(unsigned long ptr); /* x86_64 only */
void xen_pte_pin(unsigned long ptr);
void xen_pte_unpin(unsigned long ptr);
+#else
+#define xen_l1_entry_update(_p, _v) set_pte((_p), (pte_t){(_v)})
+#define xen_l2_entry_update(_p, _v) set_pgd((_p), (pgd_t){(_v)})
+#define xen_pgd_pin(_p) ((void)0)
+#define xen_pgd_unpin(_p) ((void)0)
+#define xen_pte_pin(_p) ((void)0)
+#define xen_pte_unpin(_p) ((void)0)
+#endif
+
void xen_set_ldt(unsigned long ptr, unsigned long bytes);
void xen_machphys_update(unsigned long mfn, unsigned long pfn);
-void _flush_page_update_queue(void);
-static inline int flush_page_update_queue(void)
-{
- unsigned int idx = mmu_update_queue_idx;
- if ( idx != 0 ) _flush_page_update_queue();
- return idx;
-}
-#define xen_flush_page_update_queue() (_flush_page_update_queue())
-#define XEN_flush_page_update_queue() (_flush_page_update_queue())
-void MULTICALL_flush_page_update_queue(void);
-
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-/* Allocate a contiguous empty region of low memory. Return virtual start. */
-unsigned long allocate_empty_lowmem_region(unsigned long pages);
+#ifdef CONFIG_SMP
+#include <linux/cpumask.h>
+void xen_tlb_flush_all(void);
+void xen_invlpg_all(unsigned long ptr);
+void xen_tlb_flush_mask(cpumask_t *mask);
+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
#endif
-/*
- * Assembler stubs for hyper-calls.
- */
-
-static inline int
-HYPERVISOR_set_trap_table(
- trap_info_t *table)
-{
- int ret;
- unsigned long ignore;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ignore)
- : "0" (__HYPERVISOR_set_trap_table), "1" (table)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_mmu_update(
- mmu_update_t *req, int count, int *success_count)
-{
- int ret;
- unsigned long ign1, ign2, ign3;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
- : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
- "3" (success_count)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_set_gdt(
- unsigned long *frame_list, int entries)
-{
- int ret;
- unsigned long ign1, ign2;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
- : "memory" );
-
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_stack_switch(
- unsigned long ss, unsigned long esp)
-{
- int ret;
- unsigned long ign1, ign2;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_set_callbacks(
- unsigned long event_selector, unsigned long event_address,
- unsigned long failsafe_selector, unsigned long failsafe_address)
-{
- int ret;
- unsigned long ign1, ign2, ign3, ign4;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
- : "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
- "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_fpu_taskswitch(
- void)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_yield(
- void)
-{
- int ret;
- unsigned long ign;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign)
- : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_yield)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_block(
- void)
-{
- int ret;
- unsigned long ign1;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1)
- : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_block)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_shutdown(
- void)
-{
- int ret;
- unsigned long ign1;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1)
- : "0" (__HYPERVISOR_sched_op),
- "1" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_reboot(
- void)
-{
- int ret;
- unsigned long ign1;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1)
- : "0" (__HYPERVISOR_sched_op),
- "1" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_suspend(
- unsigned long srec)
-{
- int ret;
- unsigned long ign1, ign2;
-
- /* NB. On suspend, control software expects a suspend record in %esi. */
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=S" (ign2)
- : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)),
- "S" (srec) : "memory");
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_crash(
- void)
-{
- int ret;
- unsigned long ign1;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1)
- : "0" (__HYPERVISOR_sched_op),
- "1" (SCHEDOP_shutdown | (SHUTDOWN_crash << SCHEDOP_reasonshift))
- : "memory" );
-
- return ret;
-}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+/*
+** XXX SMH: 2.4 doesn't have percpu.h (or support SMP guests) so just
+** include sufficient #defines to allow the below to build.
+*/
+#define DEFINE_PER_CPU(type, name) \
+ __typeof__(type) per_cpu__##name
-static inline long
-HYPERVISOR_set_timer_op(
- u64 timeout)
-{
- int ret;
- unsigned long timeout_hi = (unsigned long)(timeout>>32);
- unsigned long timeout_lo = (unsigned long)timeout;
- unsigned long ign1, ign2;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_set_timer_op), "b" (timeout_hi), "c" (timeout_lo)
- : "memory");
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_dom0_op(
- dom0_op_t *dom0_op)
-{
- int ret;
- unsigned long ign1;
-
- dom0_op->interface_version = DOM0_INTERFACE_VERSION;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1)
- : "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
- : "memory");
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_set_debugreg(
- int reg, unsigned long value)
-{
- int ret;
- unsigned long ign1, ign2;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
- : "memory" );
-
- return ret;
-}
-
-static inline unsigned long
-HYPERVISOR_get_debugreg(
- int reg)
-{
- unsigned long ret;
- unsigned long ign;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign)
- : "0" (__HYPERVISOR_get_debugreg), "1" (reg)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_update_descriptor(
- unsigned long ma, unsigned long word1, unsigned long word2)
-{
- int ret;
- unsigned long ign1, ign2, ign3;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
- : "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
- "3" (word2)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_set_fast_trap(
- int idx)
-{
- int ret;
- unsigned long ign;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign)
- : "0" (__HYPERVISOR_set_fast_trap), "1" (idx)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_dom_mem_op(
- unsigned int op, unsigned long *extent_list,
- unsigned long nr_extents, unsigned int extent_order)
-{
- int ret;
- unsigned long ign1, ign2, ign3, ign4, ign5;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
- "=D" (ign5)
- : "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
- "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_multicall(
- void *call_list, int nr_calls)
-{
- int ret;
- unsigned long ign1, ign2;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_update_va_mapping(
- unsigned long page_nr, pte_t new_val, unsigned long flags)
-{
- int ret;
- unsigned long ign1, ign2, ign3;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
- : "0" (__HYPERVISOR_update_va_mapping),
- "1" (page_nr), "2" ((new_val).pte_low), "3" (flags)
- : "memory" );
-
- if ( unlikely(ret < 0) )
- {
- printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
- page_nr, (new_val).pte_low, flags);
- BUG();
- }
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_event_channel_op(
- void *op)
-{
- int ret;
- unsigned long ignore;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ignore)
- : "0" (__HYPERVISOR_event_channel_op), "1" (op)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_xen_version(
- int cmd)
-{
- int ret;
- unsigned long ignore;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ignore)
- : "0" (__HYPERVISOR_xen_version), "1" (cmd)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_console_io(
- int cmd, int count, char *str)
-{
- int ret;
- unsigned long ign1, ign2, ign3;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
- : "0" (__HYPERVISOR_console_io), "1" (cmd), "2" (count), "3" (str)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_physdev_op(
- void *physdev_op)
-{
- int ret;
- unsigned long ign;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign)
- : "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_grant_table_op(
- unsigned int cmd, void *uop, unsigned int count)
-{
- int ret;
- unsigned long ign1, ign2, ign3;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
- : "0" (__HYPERVISOR_grant_table_op), "1" (cmd), "2" (count), "3" (uop)
- : "memory" );
+#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
+#define __get_cpu_var(var) per_cpu__##var
+#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
- return ret;
-}
-
-static inline int
-HYPERVISOR_update_va_mapping_otherdomain(
- unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
-{
- int ret;
- unsigned long ign1, ign2, ign3, ign4;
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+#endif /* linux < 2.6.0 */
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
- : "0" (__HYPERVISOR_update_va_mapping_otherdomain),
- "1" (page_nr), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
- "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_vm_assist(
- unsigned int cmd, unsigned int type)
-{
- int ret;
- unsigned long ign1, ign2;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
- : "memory" );
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+/* Allocate a contiguous empty region of low memory. Return virtual start. */
+unsigned long allocate_empty_lowmem_region(unsigned long pages);
+#endif
- return ret;
-}
+#include <asm/hypercall.h>
#endif /* __HYPERVISOR_H__ */
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/multicall.h b/linux-2.6.11-xen-sparse/include/asm-xen/multicall.h
deleted file mode 100644
index ca169b57b9..0000000000
--- a/linux-2.6.11-xen-sparse/include/asm-xen/multicall.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/******************************************************************************
- * multicall.h
- *
- * Copyright (c) 2003-2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __MULTICALL_H__
-#define __MULTICALL_H__
-
-#include <asm-xen/hypervisor.h>
-
-extern multicall_entry_t multicall_list[];
-extern int nr_multicall_ents;
-
-static inline void queue_multicall0(unsigned long op)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall1(unsigned long op, unsigned long arg1)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall2(
- unsigned long op, unsigned long arg1, unsigned long arg2)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall3(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall4(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3, unsigned long arg4)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- multicall_list[i].args[3] = arg4;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall5(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3, unsigned long arg4, unsigned long arg5)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- multicall_list[i].args[3] = arg4;
- multicall_list[i].args[4] = arg5;
- nr_multicall_ents = i+1;
-}
-
-static inline void execute_multicall_list(void)
-{
- if ( unlikely(nr_multicall_ents == 0) ) return;
- (void)HYPERVISOR_multicall(multicall_list, nr_multicall_ents);
- nr_multicall_ents = 0;
-}
-
-#endif /* __MULTICALL_H__ */
diff --git a/linux-2.6.11-xen-sparse/mm/mmap.c b/linux-2.6.11-xen-sparse/mm/mmap.c
new file mode 100644
index 0000000000..848200e1b8
--- /dev/null
+++ b/linux-2.6.11-xen-sparse/mm/mmap.c
@@ -0,0 +1,2108 @@
+/*
+ * mm/mmap.c
+ *
+ * Written by obz.
+ *
+ * Address space accounting code <alan@redhat.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/shm.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/syscalls.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/personality.h>
+#include <linux/security.h>
+#include <linux/hugetlb.h>
+#include <linux/profile.h>
+#include <linux/module.h>
+#include <linux/acct.h>
+#include <linux/mount.h>
+#include <linux/mempolicy.h>
+#include <linux/rmap.h>
+
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/tlb.h>
+
+/*
+ * WARNING: the debugging will use recursive algorithms so never enable this
+ * unless you know what you are doing.
+ */
+#undef DEBUG_MM_RB
+
+/* description of effects of mapping type and prot in current implementation.
+ * this is due to the limited x86 page protection hardware. The expected
+ * behavior is in parens:
+ *
+ * map_type prot
+ * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
+ * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (yes) yes w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (copy) copy w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ */
+pgprot_t protection_map[16] = {
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+};
+
+int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
+int sysctl_overcommit_ratio = 50; /* default is 50% */
+int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+atomic_t vm_committed_space = ATOMIC_INIT(0);
+
+/*
+ * Check that a process has enough memory to allocate a new virtual
+ * mapping. 0 means there is enough memory for the allocation to
+ * succeed and -ENOMEM implies there is not.
+ *
+ * We currently support three overcommit policies, which are set via the
+ * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
+ *
+ * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
+ * Additional code 2002 Jul 20 by Robert Love.
+ *
+ * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
+ *
+ * Note this is a helper function intended to be used by LSMs which
+ * wish to use this logic.
+ */
+int __vm_enough_memory(long pages, int cap_sys_admin)
+{
+ unsigned long free, allowed;
+
+ vm_acct_memory(pages);
+
+ /*
+ * Sometimes we want to use more memory than we have
+ */
+ if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
+ return 0;
+
+ if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
+ unsigned long n;
+
+ free = get_page_cache_size();
+ free += nr_swap_pages;
+
+ /*
+ * Any slabs which are created with the
+ * SLAB_RECLAIM_ACCOUNT flag claim to have contents
+ * which are reclaimable, under pressure. The dentry
+ * cache and most inode caches should fall into this
+ */
+ free += atomic_read(&slab_reclaim_pages);
+
+ /*
+ * Leave the last 3% for root
+ */
+ if (!cap_sys_admin)
+ free -= free / 32;
+
+ if (free > pages)
+ return 0;
+
+ /*
+ * nr_free_pages() is very expensive on large systems,
+ * only call if we're about to fail.
+ */
+ n = nr_free_pages();
+ if (!cap_sys_admin)
+ n -= n / 32;
+ free += n;
+
+ if (free > pages)
+ return 0;
+ vm_unacct_memory(pages);
+ return -ENOMEM;
+ }
+
+ allowed = (totalram_pages - hugetlb_total_pages())
+ * sysctl_overcommit_ratio / 100;
+ /*
+ * Leave the last 3% for root
+ */
+ if (!cap_sys_admin)
+ allowed -= allowed / 32;
+ allowed += total_swap_pages;
+
+ /* Don't let a single process grow too big:
+ leave 3% of the size of this process for other processes */
+ allowed -= current->mm->total_vm / 32;
+
+ if (atomic_read(&vm_committed_space) < allowed)
+ return 0;
+
+ vm_unacct_memory(pages);
+
+ return -ENOMEM;
+}
+
+EXPORT_SYMBOL(sysctl_overcommit_memory);
+EXPORT_SYMBOL(sysctl_overcommit_ratio);
+EXPORT_SYMBOL(sysctl_max_map_count);
+EXPORT_SYMBOL(vm_committed_space);
+EXPORT_SYMBOL(__vm_enough_memory);
+
+/*
+ * Requires inode->i_mapping->i_mmap_lock
+ */
+static void __remove_shared_vm_struct(struct vm_area_struct *vma,
+ struct file *file, struct address_space *mapping)
+{
+ if (vma->vm_flags & VM_DENYWRITE)
+ atomic_inc(&file->f_dentry->d_inode->i_writecount);
+ if (vma->vm_flags & VM_SHARED)
+ mapping->i_mmap_writable--;
+
+ flush_dcache_mmap_lock(mapping);
+ if (unlikely(vma->vm_flags & VM_NONLINEAR))
+ list_del_init(&vma->shared.vm_set.list);
+ else
+ vma_prio_tree_remove(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
+}
+
+/*
+ * Remove one vm structure and free it.
+ */
+static void remove_vm_struct(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+
+ might_sleep();
+ if (file) {
+ struct address_space *mapping = file->f_mapping;
+ spin_lock(&mapping->i_mmap_lock);
+ __remove_shared_vm_struct(vma, file, mapping);
+ spin_unlock(&mapping->i_mmap_lock);
+ }
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+ if (file)
+ fput(file);
+ anon_vma_unlink(vma);
+ mpol_free(vma_policy(vma));
+ kmem_cache_free(vm_area_cachep, vma);
+}
+
+/*
+ * sys_brk() for the most part doesn't need the global kernel
+ * lock, except when an application is doing something nasty
+ * like trying to un-brk an area that has already been mapped
+ * to a regular file. in this case, the unmapping will need
+ * to invoke file system routines that need the global lock.
+ */
+asmlinkage unsigned long sys_brk(unsigned long brk)
+{
+ unsigned long rlim, retval;
+ unsigned long newbrk, oldbrk;
+ struct mm_struct *mm = current->mm;
+
+ down_write(&mm->mmap_sem);
+
+ if (brk < mm->end_code)
+ goto out;
+ newbrk = PAGE_ALIGN(brk);
+ oldbrk = PAGE_ALIGN(mm->brk);
+ if (oldbrk == newbrk)
+ goto set_brk;
+
+ /* Always allow shrinking brk. */
+ if (brk <= mm->brk) {
+ if (!do_munmap(mm, newbrk, oldbrk-newbrk))
+ goto set_brk;
+ goto out;
+ }
+
+ /* Check against rlimit.. */
+ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+ goto out;
+
+ /* Check against existing mmap mappings. */
+ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+ goto out;
+
+ /* Ok, looks good - let it rip. */
+ if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
+ goto out;
+set_brk:
+ mm->brk = brk;
+out:
+ retval = mm->brk;
+ up_write(&mm->mmap_sem);
+ return retval;
+}
+
+#ifdef DEBUG_MM_RB
+static int browse_rb(struct rb_root *root)
+{
+ int i = 0, j;
+ struct rb_node *nd, *pn = NULL;
+ unsigned long prev = 0, pend = 0;
+
+ for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ struct vm_area_struct *vma;
+ vma = rb_entry(nd, struct vm_area_struct, vm_rb);
+ if (vma->vm_start < prev)
+ printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
+ if (vma->vm_start < pend)
+ printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
+ if (vma->vm_start > vma->vm_end)
+ printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
+ i++;
+ pn = nd;
+ }
+ j = 0;
+ for (nd = pn; nd; nd = rb_prev(nd)) {
+ j++;
+ }
+ if (i != j)
+ printk("backwards %d, forwards %d\n", j, i), i = 0;
+ return i;
+}
+
+void validate_mm(struct mm_struct *mm)
+{
+ int bug = 0;
+ int i = 0;
+ struct vm_area_struct *tmp = mm->mmap;
+ while (tmp) {
+ tmp = tmp->vm_next;
+ i++;
+ }
+ if (i != mm->map_count)
+ printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
+ i = browse_rb(&mm->mm_rb);
+ if (i != mm->map_count)
+ printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
+ if (bug)
+ BUG();
+}
+#else
+#define validate_mm(mm) do { } while (0)
+#endif
+
+static struct vm_area_struct *
+find_vma_prepare(struct mm_struct *mm, unsigned long addr,
+ struct vm_area_struct **pprev, struct rb_node ***rb_link,
+ struct rb_node ** rb_parent)
+{
+ struct vm_area_struct * vma;
+ struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
+
+ __rb_link = &mm->mm_rb.rb_node;
+ rb_prev = __rb_parent = NULL;
+ vma = NULL;
+
+ while (*__rb_link) {
+ struct vm_area_struct *vma_tmp;
+
+ __rb_parent = *__rb_link;
+ vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
+
+ if (vma_tmp->vm_end > addr) {
+ vma = vma_tmp;
+ if (vma_tmp->vm_start <= addr)
+ return vma;
+ __rb_link = &__rb_parent->rb_left;
+ } else {
+ rb_prev = __rb_parent;
+ __rb_link = &__rb_parent->rb_right;
+ }
+ }
+
+ *pprev = NULL;
+ if (rb_prev)
+ *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
+ *rb_link = __rb_link;
+ *rb_parent = __rb_parent;
+ return vma;
+}
+
+static inline void
+__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct vm_area_struct *prev, struct rb_node *rb_parent)
+{
+ if (prev) {
+ vma->vm_next = prev->vm_next;
+ prev->vm_next = vma;
+ } else {
+ mm->mmap = vma;
+ if (rb_parent)
+ vma->vm_next = rb_entry(rb_parent,
+ struct vm_area_struct, vm_rb);
+ else
+ vma->vm_next = NULL;
+ }
+}
+
+void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct rb_node **rb_link, struct rb_node *rb_parent)
+{
+ rb_link_node(&vma->vm_rb, rb_parent, rb_link);
+ rb_insert_color(&vma->vm_rb, &mm->mm_rb);
+}
+
+static inline void __vma_link_file(struct vm_area_struct *vma)
+{
+ struct file * file;
+
+ file = vma->vm_file;
+ if (file) {
+ struct address_space *mapping = file->f_mapping;
+
+ if (vma->vm_flags & VM_DENYWRITE)
+ atomic_dec(&file->f_dentry->d_inode->i_writecount);
+ if (vma->vm_flags & VM_SHARED)
+ mapping->i_mmap_writable++;
+
+ flush_dcache_mmap_lock(mapping);
+ if (unlikely(vma->vm_flags & VM_NONLINEAR))
+ vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
+ else
+ vma_prio_tree_insert(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
+ }
+}
+
+static void
+__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct vm_area_struct *prev, struct rb_node **rb_link,
+ struct rb_node *rb_parent)
+{
+ __vma_link_list(mm, vma, prev, rb_parent);
+ __vma_link_rb(mm, vma, rb_link, rb_parent);
+ __anon_vma_link(vma);
+}
+
+static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct vm_area_struct *prev, struct rb_node **rb_link,
+ struct rb_node *rb_parent)
+{
+ struct address_space *mapping = NULL;
+
+ if (vma->vm_file)
+ mapping = vma->vm_file->f_mapping;
+
+ if (mapping) {
+ spin_lock(&mapping->i_mmap_lock);
+ vma->vm_truncate_count = mapping->truncate_count;
+ }
+ anon_vma_lock(vma);
+
+ __vma_link(mm, vma, prev, rb_link, rb_parent);
+ __vma_link_file(vma);
+
+ anon_vma_unlock(vma);
+ if (mapping)
+ spin_unlock(&mapping->i_mmap_lock);
+
+ mm->map_count++;
+ validate_mm(mm);
+}
+
+/*
+ * Helper for vma_adjust in the split_vma insert case:
+ * insert vm structure into list and rbtree and anon_vma,
+ * but it has already been inserted into prio_tree earlier.
+ */
+static void
+__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+{
+ struct vm_area_struct * __vma, * prev;
+ struct rb_node ** rb_link, * rb_parent;
+
+ __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
+ if (__vma && __vma->vm_start < vma->vm_end)
+ BUG();
+ __vma_link(mm, vma, prev, rb_link, rb_parent);
+ mm->map_count++;
+}
+
+static inline void
+__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct vm_area_struct *prev)
+{
+ prev->vm_next = vma->vm_next;
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ if (mm->mmap_cache == vma)
+ mm->mmap_cache = prev;
+}
+
+/*
+ * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
+ * is already present in an i_mmap tree without adjusting the tree.
+ * The following helper function should be used when such adjustments
+ * are necessary. The "insert" vma (if any) is to be inserted
+ * before we drop the necessary locks.
+ */
+void vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *next = vma->vm_next;
+ struct vm_area_struct *importer = NULL;
+ struct address_space *mapping = NULL;
+ struct prio_tree_root *root = NULL;
+ struct file *file = vma->vm_file;
+ struct anon_vma *anon_vma = NULL;
+ long adjust_next = 0;
+ int remove_next = 0;
+
+ if (next && !insert) {
+ if (end >= next->vm_end) {
+ /*
+ * vma expands, overlapping all the next, and
+ * perhaps the one after too (mprotect case 6).
+ */
+again: remove_next = 1 + (end > next->vm_end);
+ end = next->vm_end;
+ anon_vma = next->anon_vma;
+ importer = vma;
+ } else if (end > next->vm_start) {
+ /*
+ * vma expands, overlapping part of the next:
+ * mprotect case 5 shifting the boundary up.
+ */
+ adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
+ anon_vma = next->anon_vma;
+ importer = vma;
+ } else if (end < vma->vm_end) {
+ /*
+ * vma shrinks, and !insert tells it's not
+ * split_vma inserting another: so it must be
+ * mprotect case 4 shifting the boundary down.
+ */
+ adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
+ anon_vma = next->anon_vma;
+ importer = next;
+ }
+ }
+
+ if (file) {
+ mapping = file->f_mapping;
+ if (!(vma->vm_flags & VM_NONLINEAR))
+ root = &mapping->i_mmap;
+ spin_lock(&mapping->i_mmap_lock);
+ if (importer &&
+ vma->vm_truncate_count != next->vm_truncate_count) {
+ /*
+ * unmap_mapping_range might be in progress:
+ * ensure that the expanding vma is rescanned.
+ */
+ importer->vm_truncate_count = 0;
+ }
+ if (insert) {
+ insert->vm_truncate_count = vma->vm_truncate_count;
+ /*
+ * Put into prio_tree now, so instantiated pages
+ * are visible to arm/parisc __flush_dcache_page
+ * throughout; but we cannot insert into address
+ * space until vma start or end is updated.
+ */
+ __vma_link_file(insert);
+ }
+ }
+
+ /*
+ * When changing only vma->vm_end, we don't really need
+ * anon_vma lock: but is that case worth optimizing out?
+ */
+ if (vma->anon_vma)
+ anon_vma = vma->anon_vma;
+ if (anon_vma) {
+ spin_lock(&anon_vma->lock);
+ /*
+ * Easily overlooked: when mprotect shifts the boundary,
+ * make sure the expanding vma has anon_vma set if the
+ * shrinking vma had, to cover any anon pages imported.
+ */
+ if (importer && !importer->anon_vma) {
+ importer->anon_vma = anon_vma;
+ __anon_vma_link(importer);
+ }
+ }
+
+ if (root) {
+ flush_dcache_mmap_lock(mapping);
+ vma_prio_tree_remove(vma, root);
+ if (adjust_next)
+ vma_prio_tree_remove(next, root);
+ }
+
+ vma->vm_start = start;
+ vma->vm_end = end;
+ vma->vm_pgoff = pgoff;
+ if (adjust_next) {
+ next->vm_start += adjust_next << PAGE_SHIFT;
+ next->vm_pgoff += adjust_next;
+ }
+
+ if (root) {
+ if (adjust_next)
+ vma_prio_tree_insert(next, root);
+ vma_prio_tree_insert(vma, root);
+ flush_dcache_mmap_unlock(mapping);
+ }
+
+ if (remove_next) {
+ /*
+ * vma_merge has merged next into vma, and needs
+ * us to remove next before dropping the locks.
+ */
+ __vma_unlink(mm, next, vma);
+ if (file)
+ __remove_shared_vm_struct(next, file, mapping);
+ if (next->anon_vma)
+ __anon_vma_merge(vma, next);
+ } else if (insert) {
+ /*
+ * split_vma has split insert from vma, and needs
+ * us to insert it before dropping the locks
+ * (it may either follow vma or precede it).
+ */
+ __insert_vm_struct(mm, insert);
+ }
+
+ if (anon_vma)
+ spin_unlock(&anon_vma->lock);
+ if (mapping)
+ spin_unlock(&mapping->i_mmap_lock);
+
+ if (remove_next) {
+ if (file)
+ fput(file);
+ mm->map_count--;
+ mpol_free(vma_policy(next));
+ kmem_cache_free(vm_area_cachep, next);
+ /*
+ * In mprotect's case 6 (see comments on vma_merge),
+ * we must remove another next too. It would clutter
+ * up the code too much to do both in one go.
+ */
+ if (remove_next == 2) {
+ next = vma->vm_next;
+ goto again;
+ }
+ }
+
+ validate_mm(mm);
+}
+
+/*
+ * If the vma has a ->close operation then the driver probably needs to release
+ * per-vma resources, so we don't attempt to merge those.
+ */
+#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
+
+static inline int is_mergeable_vma(struct vm_area_struct *vma,
+ struct file *file, unsigned long vm_flags)
+{
+ if (vma->vm_flags != vm_flags)
+ return 0;
+ if (vma->vm_file != file)
+ return 0;
+ if (vma->vm_ops && vma->vm_ops->close)
+ return 0;
+ return 1;
+}
+
+static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
+ struct anon_vma *anon_vma2)
+{
+ return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2);
+}
+
+/*
+ * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
+ * in front of (at a lower virtual address and file offset than) the vma.
+ *
+ * We cannot merge two vmas if they have differently assigned (non-NULL)
+ * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
+ *
+ * We don't check here for the merged mmap wrapping around the end of pagecache
+ * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
+ * wrap, nor mmaps which cover the final page at index -1UL.
+ */
+static int
+can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+{
+ if (is_mergeable_vma(vma, file, vm_flags) &&
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
+ if (vma->vm_pgoff == vm_pgoff)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
+ * beyond (at a higher virtual address and file offset than) the vma.
+ *
+ * We cannot merge two vmas if they have differently assigned (non-NULL)
+ * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
+ */
+static int
+can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+{
+ if (is_mergeable_vma(vma, file, vm_flags) &&
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
+ pgoff_t vm_pglen;
+ vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ if (vma->vm_pgoff + vm_pglen == vm_pgoff)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
+ * whether that can be merged with its predecessor or its successor.
+ * Or both (it neatly fills a hole).
+ *
+ * In most cases - when called for mmap, brk or mremap - [addr,end) is
+ * certain not to be mapped by the time vma_merge is called; but when
+ * called for mprotect, it is certain to be already mapped (either at
+ * an offset within prev, or at the start of next), and the flags of
+ * this area are about to be changed to vm_flags - and the no-change
+ * case has already been eliminated.
+ *
+ * The following mprotect cases have to be considered, where AAAA is
+ * the area passed down from mprotect_fixup, never extending beyond one
+ * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
+ *
+ * AAAA AAAA AAAA AAAA
+ * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX
+ * cannot merge might become might become might become
+ * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or
+ * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or
+ * mremap move: PPPPNNNNNNNN 8
+ * AAAA
+ * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
+ * might become case 1 below case 2 below case 3 below
+ *
+ * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
+ * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
+ */
+struct vm_area_struct *vma_merge(struct mm_struct *mm,
+ struct vm_area_struct *prev, unsigned long addr,
+ unsigned long end, unsigned long vm_flags,
+ struct anon_vma *anon_vma, struct file *file,
+ pgoff_t pgoff, struct mempolicy *policy)
+{
+ pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
+ struct vm_area_struct *area, *next;
+
+ /*
+ * We later require that vma->vm_flags == vm_flags,
+ * so this tests vma->vm_flags & VM_SPECIAL, too.
+ */
+ if (vm_flags & VM_SPECIAL)
+ return NULL;
+
+ if (prev)
+ next = prev->vm_next;
+ else
+ next = mm->mmap;
+ area = next;
+ if (next && next->vm_end == end) /* cases 6, 7, 8 */
+ next = next->vm_next;
+
+ /*
+ * Can it merge with the predecessor?
+ */
+ if (prev && prev->vm_end == addr &&
+ mpol_equal(vma_policy(prev), policy) &&
+ can_vma_merge_after(prev, vm_flags,
+ anon_vma, file, pgoff)) {
+ /*
+ * OK, it can. Can we now merge in the successor as well?
+ */
+ if (next && end == next->vm_start &&
+ mpol_equal(policy, vma_policy(next)) &&
+ can_vma_merge_before(next, vm_flags,
+ anon_vma, file, pgoff+pglen) &&
+ is_mergeable_anon_vma(prev->anon_vma,
+ next->anon_vma)) {
+ /* cases 1, 6 */
+ vma_adjust(prev, prev->vm_start,
+ next->vm_end, prev->vm_pgoff, NULL);
+ } else /* cases 2, 5, 7 */
+ vma_adjust(prev, prev->vm_start,
+ end, prev->vm_pgoff, NULL);
+ return prev;
+ }
+
+ /*
+ * Can this new request be merged in front of next?
+ */
+ if (next && end == next->vm_start &&
+ mpol_equal(policy, vma_policy(next)) &&
+ can_vma_merge_before(next, vm_flags,
+ anon_vma, file, pgoff+pglen)) {
+ if (prev && addr < prev->vm_end) /* case 4 */
+ vma_adjust(prev, prev->vm_start,
+ addr, prev->vm_pgoff, NULL);
+ else /* cases 3, 8 */
+ vma_adjust(area, addr, next->vm_end,
+ next->vm_pgoff - pglen, NULL);
+ return area;
+ }
+
+ return NULL;
+}
+
+/*
+ * find_mergeable_anon_vma is used by anon_vma_prepare, to check
+ * neighbouring vmas for a suitable anon_vma, before it goes off
+ * to allocate a new anon_vma. It checks because a repetitive
+ * sequence of mprotects and faults may otherwise lead to distinct
+ * anon_vmas being allocated, preventing vma merge in subsequent
+ * mprotect.
+ */
+struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *near;
+ unsigned long vm_flags;
+
+ near = vma->vm_next;
+ if (!near)
+ goto try_prev;
+
+ /*
+ * Since only mprotect tries to remerge vmas, match flags
+ * which might be mprotected into each other later on.
+ * Neither mlock nor madvise tries to remerge at present,
+ * so leave their flags as obstructing a merge.
+ */
+ vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
+ vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
+
+ if (near->anon_vma && vma->vm_end == near->vm_start &&
+ mpol_equal(vma_policy(vma), vma_policy(near)) &&
+ can_vma_merge_before(near, vm_flags,
+ NULL, vma->vm_file, vma->vm_pgoff +
+ ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)))
+ return near->anon_vma;
+try_prev:
+ /*
+ * It is potentially slow to have to call find_vma_prev here.
+ * But it's only on the first write fault on the vma, not
+ * every time, and we could devise a way to avoid it later
+ * (e.g. stash info in next's anon_vma_node when assigning
+ * an anon_vma, or when trying vma_merge). Another time.
+ */
+ if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma)
+ BUG();
+ if (!near)
+ goto none;
+
+ vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
+ vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
+
+ if (near->anon_vma && near->vm_end == vma->vm_start &&
+ mpol_equal(vma_policy(near), vma_policy(vma)) &&
+ can_vma_merge_after(near, vm_flags,
+ NULL, vma->vm_file, vma->vm_pgoff))
+ return near->anon_vma;
+none:
+ /*
+ * There's no absolute need to look only at touching neighbours:
+ * we could search further afield for "compatible" anon_vmas.
+ * But it would probably just be a waste of time searching,
+ * or lead to too many vmas hanging off the same anon_vma.
+ * We're trying to allow mprotect remerging later on,
+ * not trying to minimize memory used for anon_vmas.
+ */
+ return NULL;
+}
+
+#ifdef CONFIG_PROC_FS
+void __vm_stat_account(struct mm_struct *mm, unsigned long flags,
+ struct file *file, long pages)
+{
+ const unsigned long stack_flags
+ = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
+
+#ifdef CONFIG_HUGETLB
+ if (flags & VM_HUGETLB) {
+ if (!(flags & VM_DONTCOPY))
+ mm->shared_vm += pages;
+ return;
+ }
+#endif /* CONFIG_HUGETLB */
+
+ if (file) {
+ mm->shared_vm += pages;
+ if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
+ mm->exec_vm += pages;
+ } else if (flags & stack_flags)
+ mm->stack_vm += pages;
+ if (flags & (VM_RESERVED|VM_IO))
+ mm->reserved_vm += pages;
+}
+#endif /* CONFIG_PROC_FS */
+
+/*
+ * The caller must hold down_write(current->mm->mmap_sem).
+ */
+
+unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flags, unsigned long pgoff)
+{
+ struct mm_struct * mm = current->mm;
+ struct vm_area_struct * vma, * prev;
+ struct inode *inode;
+ unsigned int vm_flags;
+ int correct_wcount = 0;
+ int error;
+ struct rb_node ** rb_link, * rb_parent;
+ int accountable = 1;
+ unsigned long charged = 0;
+
+ if (file) {
+ if (is_file_hugepages(file))
+ accountable = 0;
+
+ if (!file->f_op || !file->f_op->mmap)
+ return -ENODEV;
+
+ if ((prot & PROT_EXEC) &&
+ (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
+ return -EPERM;
+ }
+ /*
+ * Does the application expect PROT_READ to imply PROT_EXEC?
+ *
+ * (the exception is when the underlying filesystem is noexec
+ * mounted, in which case we dont add PROT_EXEC.)
+ */
+ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+ if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)))
+ prot |= PROT_EXEC;
+
+ if (!len)
+ return addr;
+
+ /* Careful about overflows.. */
+ len = PAGE_ALIGN(len);
+ if (!len || len > TASK_SIZE)
+ return -EINVAL;
+
+ /* offset overflow? */
+ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
+ return -EINVAL;
+
+ /* Too many mappings? */
+ if (mm->map_count > sysctl_max_map_count)
+ return -ENOMEM;
+
+ /* Obtain the address to map to. we verify (or select) it and ensure
+ * that it represents a valid section of the address space.
+ */
+ addr = get_unmapped_area(file, addr, len, pgoff, flags);
+ if (addr & ~PAGE_MASK)
+ return addr;
+
+ /* Do simple checking here so the lower-level routines won't have
+ * to. we assume access permissions have been handled by the open
+ * of the memory object, so we don't do any here.
+ */
+ vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
+ mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+
+ if (flags & MAP_LOCKED) {
+ if (!can_do_mlock())
+ return -EPERM;
+ vm_flags |= VM_LOCKED;
+ }
+ /* mlock MCL_FUTURE? */
+ if (vm_flags & VM_LOCKED) {
+ unsigned long locked, lock_limit;
+ locked = mm->locked_vm << PAGE_SHIFT;
+ lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ locked += len;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ return -EAGAIN;
+ }
+
+ inode = file ? file->f_dentry->d_inode : NULL;
+
+ if (file) {
+ switch (flags & MAP_TYPE) {
+ case MAP_SHARED:
+ if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
+ return -EACCES;
+
+ /*
+ * Make sure we don't allow writing to an append-only
+ * file..
+ */
+ if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
+ return -EACCES;
+
+ /*
+ * Make sure there are no mandatory locks on the file.
+ */
+ if (locks_verify_locked(inode))
+ return -EAGAIN;
+
+ vm_flags |= VM_SHARED | VM_MAYSHARE;
+ if (!(file->f_mode & FMODE_WRITE))
+ vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
+
+ /* fall through */
+ case MAP_PRIVATE:
+ if (!(file->f_mode & FMODE_READ))
+ return -EACCES;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (flags & MAP_TYPE) {
+ case MAP_SHARED:
+ vm_flags |= VM_SHARED | VM_MAYSHARE;
+ break;
+ case MAP_PRIVATE:
+ /*
+ * Set pgoff according to addr for anon_vma.
+ */
+ pgoff = addr >> PAGE_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ error = security_file_mmap(file, prot, flags);
+ if (error)
+ return error;
+
+ /* Clear old maps */
+ error = -ENOMEM;
+munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+ goto munmap_back;
+ }
+
+ /* Check against address space limit. */
+ if ((mm->total_vm << PAGE_SHIFT) + len
+ > current->signal->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+
+ if (accountable && (!(flags & MAP_NORESERVE) ||
+ sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
+ if (vm_flags & VM_SHARED) {
+ /* Check memory availability in shmem_file_setup? */
+ vm_flags |= VM_ACCOUNT;
+ } else if (vm_flags & VM_WRITE) {
+ /*
+ * Private writable mapping: check memory availability
+ */
+ charged = len >> PAGE_SHIFT;
+ if (security_vm_enough_memory(charged))
+ return -ENOMEM;
+ vm_flags |= VM_ACCOUNT;
+ }
+ }
+
+ /*
+ * Can we just expand an old private anonymous mapping?
+ * The VM_SHARED test is necessary because shmem_zero_setup
+ * will create the file object for a shared anonymous map below.
+ */
+ if (!file && !(vm_flags & VM_SHARED) &&
+ vma_merge(mm, prev, addr, addr + len, vm_flags,
+ NULL, NULL, pgoff, NULL))
+ goto out;
+
+ /*
+ * Determine the object being mapped and call the appropriate
+ * specific mapper. the address has already been validated, but
+ * not unmapped, but the maps are removed from the list.
+ */
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!vma) {
+ error = -ENOMEM;
+ goto unacct_error;
+ }
+ memset(vma, 0, sizeof(*vma));
+
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+ vma->vm_flags = vm_flags;
+ vma->vm_page_prot = protection_map[vm_flags & 0x0f];
+ vma->vm_pgoff = pgoff;
+
+ if (file) {
+ error = -EINVAL;
+ if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+ goto free_vma;
+ if (vm_flags & VM_DENYWRITE) {
+ error = deny_write_access(file);
+ if (error)
+ goto free_vma;
+ correct_wcount = 1;
+ }
+ vma->vm_file = file;
+ get_file(file);
+ error = file->f_op->mmap(file, vma);
+ if (error)
+ goto unmap_and_free_vma;
+ } else if (vm_flags & VM_SHARED) {
+ error = shmem_zero_setup(vma);
+ if (error)
+ goto free_vma;
+ }
+
+ /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
+ * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
+ * that memory reservation must be checked; but that reservation
+ * belongs to shared memory object, not to vma: so now clear it.
+ */
+ if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
+ vma->vm_flags &= ~VM_ACCOUNT;
+
+ /* Can addr have changed??
+ *
+ * Answer: Yes, several device drivers can do it in their
+ * f_op->mmap method. -DaveM
+ */
+ addr = vma->vm_start;
+ pgoff = vma->vm_pgoff;
+ vm_flags = vma->vm_flags;
+
+ if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
+ vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
+ file = vma->vm_file;
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ if (correct_wcount)
+ atomic_inc(&inode->i_writecount);
+ } else {
+ if (file) {
+ if (correct_wcount)
+ atomic_inc(&inode->i_writecount);
+ fput(file);
+ }
+ mpol_free(vma_policy(vma));
+ kmem_cache_free(vm_area_cachep, vma);
+ }
+out:
+ mm->total_vm += len >> PAGE_SHIFT;
+ __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
+ if (vm_flags & VM_LOCKED) {
+ mm->locked_vm += len >> PAGE_SHIFT;
+ make_pages_present(addr, addr + len);
+ }
+ if (flags & MAP_POPULATE) {
+ up_write(&mm->mmap_sem);
+ sys_remap_file_pages(addr, len, 0,
+ pgoff, flags & MAP_NONBLOCK);
+ down_write(&mm->mmap_sem);
+ }
+ acct_update_integrals();
+ update_mem_hiwater();
+ return addr;
+
+unmap_and_free_vma:
+ if (correct_wcount)
+ atomic_inc(&inode->i_writecount);
+ vma->vm_file = NULL;
+ fput(file);
+
+ /* Undo any partial mapping done by a device driver. */
+ zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
+free_vma:
+ kmem_cache_free(vm_area_cachep, vma);
+unacct_error:
+ if (charged)
+ vm_unacct_memory(charged);
+ return error;
+}
+
+EXPORT_SYMBOL(do_mmap_pgoff);
+
+/* Get an address range which is currently unmapped.
+ * For shmat() with addr=0.
+ *
+ * Ugly calling convention alert:
+ * Return value with the low bits set means error value,
+ * ie
+ * if (ret & ~PAGE_MASK)
+ * error = ret;
+ *
+ * This function "knows" that -ENOMEM has the bits set.
+ */
+#ifndef HAVE_ARCH_UNMAPPED_AREA
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+ start_addr = addr = mm->free_area_cache;
+
+full_search:
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr) {
+ /*
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+ if (start_addr != TASK_UNMAPPED_BASE) {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+ if (!vma || addr + len <= vma->vm_start) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+ mm->free_area_cache = addr + len;
+ return addr;
+ }
+ addr = vma->vm_end;
+ }
+}
+#endif
+
+void arch_unmap_area(struct vm_area_struct *area)
+{
+ /*
+ * Is this a new hole at the lowest possible address?
+ */
+ if (area->vm_start >= TASK_UNMAPPED_BASE &&
+ area->vm_start < area->vm_mm->free_area_cache)
+ area->vm_mm->free_area_cache = area->vm_start;
+}
+
+/*
+ * This mmap-allocator allocates new areas top-down from below the
+ * stack's low limit (the base):
+ */
+#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+{
+ struct vm_area_struct *vma, *prev_vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long base = mm->mmap_base, addr = addr0;
+ int first_time = 1;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ /* dont allow allocations above current base */
+ if (mm->free_area_cache > base)
+ mm->free_area_cache = base;
+
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+try_again:
+ /* make sure it can fit in the remaining address space */
+ if (mm->free_area_cache < len)
+ goto fail;
+
+ /* either no address requested or cant fit in requested address hole */
+ addr = (mm->free_area_cache - len) & PAGE_MASK;
+ do {
+ /*
+ * Lookup failure means no vma is above this address,
+ * i.e. return with success:
+ */
+ if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+ return addr;
+
+ /*
+ * new region fits between prev_vma->vm_end and
+ * vma->vm_start, use it:
+ */
+ if (addr+len <= vma->vm_start &&
+ (!prev_vma || (addr >= prev_vma->vm_end)))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ else
+ /* pull free_area_cache down to the first hole */
+ if (mm->free_area_cache == vma->vm_end)
+ mm->free_area_cache = vma->vm_start;
+
+ /* try just below the current vma->vm_start */
+ addr = vma->vm_start-len;
+ } while (len <= vma->vm_start);
+
+fail:
+ /*
+ * if hint left us with no space for the requested
+ * mapping then try again:
+ */
+ if (first_time) {
+ mm->free_area_cache = base;
+ first_time = 0;
+ goto try_again;
+ }
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+ mm->free_area_cache = base;
+
+ return addr;
+}
+#endif
+
+void arch_unmap_area_topdown(struct vm_area_struct *area)
+{
+ /*
+ * Is this a new hole at the highest possible address?
+ */
+ if (area->vm_end > area->vm_mm->free_area_cache)
+ area->vm_mm->free_area_cache = area->vm_end;
+}
+
+unsigned long
+get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ if (flags & MAP_FIXED) {
+ unsigned long ret;
+
+ if (addr > TASK_SIZE - len)
+ return -ENOMEM;
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+ if (file && is_file_hugepages(file)) {
+ /*
+ * Check if the given range is hugepage aligned, and
+ * can be made suitable for hugepages.
+ */
+ ret = prepare_hugepage_range(addr, len);
+ } else {
+ /*
+ * Ensure that a normal request is not falling in a
+ * reserved hugepage range. For some archs like IA-64,
+ * there is a separate region for hugepages.
+ */
+ ret = is_hugepage_only_range(addr, len);
+ }
+ if (ret)
+ return -EINVAL;
+ return addr;
+ }
+
+ if (file && file->f_op && file->f_op->get_unmapped_area)
+ return file->f_op->get_unmapped_area(file, addr, len,
+ pgoff, flags);
+
+ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
+EXPORT_SYMBOL(get_unmapped_area);
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
+{
+ struct vm_area_struct *vma = NULL;
+
+ if (mm) {
+ /* Check the cache first. */
+ /* (Cache hit rate is typically around 35%.) */
+ vma = mm->mmap_cache;
+ if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+ struct rb_node * rb_node;
+
+ rb_node = mm->mm_rb.rb_node;
+ vma = NULL;
+
+ while (rb_node) {
+ struct vm_area_struct * vma_tmp;
+
+ vma_tmp = rb_entry(rb_node,
+ struct vm_area_struct, vm_rb);
+
+ if (vma_tmp->vm_end > addr) {
+ vma = vma_tmp;
+ if (vma_tmp->vm_start <= addr)
+ break;
+ rb_node = rb_node->rb_left;
+ } else
+ rb_node = rb_node->rb_right;
+ }
+ if (vma)
+ mm->mmap_cache = vma;
+ }
+ }
+ return vma;
+}
+
+EXPORT_SYMBOL(find_vma);
+
+/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
+struct vm_area_struct *
+find_vma_prev(struct mm_struct *mm, unsigned long addr,
+ struct vm_area_struct **pprev)
+{
+ struct vm_area_struct *vma = NULL, *prev = NULL;
+ struct rb_node * rb_node;
+ if (!mm)
+ goto out;
+
+ /* Guard against addr being lower than the first VMA */
+ vma = mm->mmap;
+
+ /* Go through the RB tree quickly. */
+ rb_node = mm->mm_rb.rb_node;
+
+ while (rb_node) {
+ struct vm_area_struct *vma_tmp;
+ vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+
+ if (addr < vma_tmp->vm_end) {
+ rb_node = rb_node->rb_left;
+ } else {
+ prev = vma_tmp;
+ if (!prev->vm_next || (addr < prev->vm_next->vm_end))
+ break;
+ rb_node = rb_node->rb_right;
+ }
+ }
+
+out:
+ *pprev = prev;
+ return prev ? prev->vm_next : vma;
+}
+
+/*
+ * Verify that the stack growth is acceptable and
+ * update accounting. This is shared with both the
+ * grow-up and grow-down cases.
+ */
+static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct rlimit *rlim = current->signal->rlim;
+
+ /* address space limit tests */
+ if (mm->total_vm + grow > rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT)
+ return -ENOMEM;
+
+ /* Stack limit test */
+ if (size > rlim[RLIMIT_STACK].rlim_cur)
+ return -ENOMEM;
+
+ /* mlock limit tests */
+ if (vma->vm_flags & VM_LOCKED) {
+ unsigned long locked;
+ unsigned long limit;
+ locked = mm->locked_vm + grow;
+ limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+ if (locked > limit && !capable(CAP_IPC_LOCK))
+ return -ENOMEM;
+ }
+
+ /*
+ * Overcommit.. This must be the final test, as it will
+ * update security statistics.
+ */
+ if (security_vm_enough_memory(grow))
+ return -ENOMEM;
+
+ /* Ok, everything looks good - let it rip */
+ mm->total_vm += grow;
+ if (vma->vm_flags & VM_LOCKED)
+ mm->locked_vm += grow;
+ __vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
+ acct_update_integrals();
+ update_mem_hiwater();
+ return 0;
+}
+
+#ifdef CONFIG_STACK_GROWSUP
+/*
+ * vma is the first one with address > vma->vm_end. Have to extend vma.
+ */
+int expand_stack(struct vm_area_struct * vma, unsigned long address)
+{
+ int error;
+
+ if (!(vma->vm_flags & VM_GROWSUP))
+ return -EFAULT;
+
+ /*
+ * We must make sure the anon_vma is allocated
+ * so that the anon_vma locking is not a noop.
+ */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
+ anon_vma_lock(vma);
+
+ /*
+ * vma->vm_start/vm_end cannot change under us because the caller
+ * is required to hold the mmap_sem in read mode. We need the
+ * anon_vma lock to serialize against concurrent expand_stacks.
+ */
+ address += 4 + PAGE_SIZE - 1;
+ address &= PAGE_MASK;
+ error = 0;
+
+ /* Somebody else might have raced and expanded it already */
+ if (address > vma->vm_end) {
+ unsigned long size, grow;
+
+ size = address - vma->vm_start;
+ grow = (address - vma->vm_end) >> PAGE_SHIFT;
+
+ error = acct_stack_growth(vma, size, grow);
+ if (!error)
+ vma->vm_end = address;
+ }
+ anon_vma_unlock(vma);
+ return error;
+}
+
+struct vm_area_struct *
+find_extend_vma(struct mm_struct *mm, unsigned long addr)
+{
+ struct vm_area_struct *vma, *prev;
+
+ addr &= PAGE_MASK;
+ vma = find_vma_prev(mm, addr, &prev);
+ if (vma && (vma->vm_start <= addr))
+ return vma;
+ if (!prev || expand_stack(prev, addr))
+ return NULL;
+ if (prev->vm_flags & VM_LOCKED) {
+ make_pages_present(addr, prev->vm_end);
+ }
+ return prev;
+}
+#else
+/*
+ * vma is the first one with address < vma->vm_start. Have to extend vma.
+ */
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+ int error;
+
+ /*
+ * We must make sure the anon_vma is allocated
+ * so that the anon_vma locking is not a noop.
+ */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
+ anon_vma_lock(vma);
+
+ /*
+ * vma->vm_start/vm_end cannot change under us because the caller
+ * is required to hold the mmap_sem in read mode. We need the
+ * anon_vma lock to serialize against concurrent expand_stacks.
+ */
+ address &= PAGE_MASK;
+ error = 0;
+
+ /* Somebody else might have raced and expanded it already */
+ if (address < vma->vm_start) {
+ unsigned long size, grow;
+
+ size = vma->vm_end - address;
+ grow = (vma->vm_start - address) >> PAGE_SHIFT;
+
+ error = acct_stack_growth(vma, size, grow);
+ if (!error) {
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
+ }
+ }
+ anon_vma_unlock(vma);
+ return error;
+}
+
+struct vm_area_struct *
+find_extend_vma(struct mm_struct * mm, unsigned long addr)
+{
+ struct vm_area_struct * vma;
+ unsigned long start;
+
+ addr &= PAGE_MASK;
+ vma = find_vma(mm,addr);
+ if (!vma)
+ return NULL;
+ if (vma->vm_start <= addr)
+ return vma;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ return NULL;
+ start = vma->vm_start;
+ if (expand_stack(vma, addr))
+ return NULL;
+ if (vma->vm_flags & VM_LOCKED) {
+ make_pages_present(addr, start);
+ }
+ return vma;
+}
+#endif
+
+/*
+ * Try to free as many page directory entries as we can,
+ * without having to work very hard at actually scanning
+ * the page tables themselves.
+ *
+ * Right now we try to free page tables if we have a nice
+ * PGDIR-aligned area that got free'd up. We could be more
+ * granular if we want to, but this is fast and simple,
+ * and covers the bad cases.
+ *
+ * "prev", if it exists, points to a vma before the one
+ * we just free'd - but there's no telling how much before.
+ */
+static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
+ unsigned long start, unsigned long end)
+{
+ unsigned long first = start & PGDIR_MASK;
+ unsigned long last = end + PGDIR_SIZE - 1;
+ struct mm_struct *mm = tlb->mm;
+
+ if (last > MM_VM_SIZE(mm) || last < end)
+ last = MM_VM_SIZE(mm);
+
+ if (!prev) {
+ prev = mm->mmap;
+ if (!prev)
+ goto no_mmaps;
+ if (prev->vm_end > start) {
+ if (last > prev->vm_start)
+ last = prev->vm_start;
+ goto no_mmaps;
+ }
+ }
+ for (;;) {
+ struct vm_area_struct *next = prev->vm_next;
+
+ if (next) {
+ if (next->vm_start < start) {
+ prev = next;
+ continue;
+ }
+ if (last > next->vm_start)
+ last = next->vm_start;
+ }
+ if (prev->vm_end > first)
+ first = prev->vm_end;
+ break;
+ }
+no_mmaps:
+ if (last < first) /* for arches with discontiguous pgd indices */
+ return;
+ if (first < FIRST_USER_PGD_NR * PGDIR_SIZE)
+ first = FIRST_USER_PGD_NR * PGDIR_SIZE;
+ /* No point trying to free anything if we're in the same pte page */
+ if ((first & PMD_MASK) < (last & PMD_MASK)) {
+ clear_page_range(tlb, first, last);
+ flush_tlb_pgtables(mm, first, last);
+ }
+}
+
+/* Normal function to fix up a mapping
+ * This function is the default for when an area has no specific
+ * function. This may be used as part of a more specific routine.
+ *
+ * By the time this function is called, the area struct has been
+ * removed from the process mapping list.
+ */
+static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
+{
+ size_t len = area->vm_end - area->vm_start;
+
+ area->vm_mm->total_vm -= len >> PAGE_SHIFT;
+ if (area->vm_flags & VM_LOCKED)
+ area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
+ vm_stat_unaccount(area);
+ area->vm_mm->unmap_area(area);
+ remove_vm_struct(area);
+}
+
+/*
+ * Update the VMA and inode share lists.
+ *
+ * Ok - we have the memory areas we should free on the 'free' list,
+ * so release them, and do the vma updates.
+ */
+static void unmap_vma_list(struct mm_struct *mm,
+ struct vm_area_struct *mpnt)
+{
+ do {
+ struct vm_area_struct *next = mpnt->vm_next;
+ unmap_vma(mm, mpnt);
+ mpnt = next;
+ } while (mpnt != NULL);
+ validate_mm(mm);
+}
+
+/*
+ * Get rid of page table information in the indicated region.
+ *
+ * Called with the page table lock held.
+ */
+static void unmap_region(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ struct vm_area_struct *prev,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mmu_gather *tlb;
+ unsigned long nr_accounted = 0;
+
+ lru_add_drain();
+ tlb = tlb_gather_mmu(mm, 0);
+ unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
+ vm_unacct_memory(nr_accounted);
+
+ if (is_hugepage_only_range(start, end - start))
+ hugetlb_free_pgtables(tlb, prev, start, end);
+ else
+ free_pgtables(tlb, prev, start, end);
+ tlb_finish_mmu(tlb, start, end);
+}
+
+/*
+ * Create a list of vma's touched by the unmap, removing them from the mm's
+ * vma list as we go..
+ */
+static void
+detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct vm_area_struct *prev, unsigned long end)
+{
+ struct vm_area_struct **insertion_point;
+ struct vm_area_struct *tail_vma = NULL;
+
+ insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+ do {
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ mm->map_count--;
+ tail_vma = vma;
+ vma = vma->vm_next;
+ } while (vma && vma->vm_start < end);
+ *insertion_point = vma;
+ tail_vma->vm_next = NULL;
+ mm->mmap_cache = NULL; /* Kill the cache. */
+}
+
+/*
+ * Split a vma into two pieces at address 'addr', a new vma is allocated
+ * either for the first part or the the tail.
+ */
+int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+ unsigned long addr, int new_below)
+{
+ struct mempolicy *pol;
+ struct vm_area_struct *new;
+
+ if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK))
+ return -EINVAL;
+
+ if (mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+ new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ /* most fields are the same, copy all, and then fixup */
+ *new = *vma;
+
+ if (new_below)
+ new->vm_end = addr;
+ else {
+ new->vm_start = addr;
+ new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
+ }
+
+ pol = mpol_copy(vma_policy(vma));
+ if (IS_ERR(pol)) {
+ kmem_cache_free(vm_area_cachep, new);
+ return PTR_ERR(pol);
+ }
+ vma_set_policy(new, pol);
+
+ if (new->vm_file)
+ get_file(new->vm_file);
+
+ if (new->vm_ops && new->vm_ops->open)
+ new->vm_ops->open(new);
+
+ if (new_below)
+ vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
+ ((addr - new->vm_start) >> PAGE_SHIFT), new);
+ else
+ vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
+
+ return 0;
+}
+
+/* Munmap is split into 2 main parts -- this part which finds
+ * what needs doing, and the areas themselves, which do the
+ * work. This now handles partial unmappings.
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+{
+ unsigned long end;
+ struct vm_area_struct *mpnt, *prev, *last;
+
+ if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
+ return -EINVAL;
+
+ if ((len = PAGE_ALIGN(len)) == 0)
+ return -EINVAL;
+
+ /* Find the first overlapping VMA */
+ mpnt = find_vma_prev(mm, start, &prev);
+ if (!mpnt)
+ return 0;
+ /* we have start < mpnt->vm_end */
+
+ /* if it doesn't overlap, we have nothing.. */
+ end = start + len;
+ if (mpnt->vm_start >= end)
+ return 0;
+
+ /*
+ * If we need to split any vma, do it now to save pain later.
+ *
+ * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
+ * unmapped vm_area_struct will remain in use: so lower split_vma
+ * places tmp vma above, and higher split_vma places tmp vma below.
+ */
+ if (start > mpnt->vm_start) {
+ int error = split_vma(mm, mpnt, start, 0);
+ if (error)
+ return error;
+ prev = mpnt;
+ }
+
+ /* Does it split the last one? */
+ last = find_vma(mm, end);
+ if (last && end > last->vm_start) {
+ int error = split_vma(mm, last, end, 1);
+ if (error)
+ return error;
+ }
+ mpnt = prev? prev->vm_next: mm->mmap;
+
+ /*
+ * Remove the vma's, and unmap the actual pages
+ */
+ detach_vmas_to_be_unmapped(mm, mpnt, prev, end);
+ spin_lock(&mm->page_table_lock);
+ unmap_region(mm, mpnt, prev, start, end);
+ spin_unlock(&mm->page_table_lock);
+
+ /* Fix up all other VM information */
+ unmap_vma_list(mm, mpnt);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(do_munmap);
+
+asmlinkage long sys_munmap(unsigned long addr, size_t len)
+{
+ int ret;
+ struct mm_struct *mm = current->mm;
+
+ profile_munmap(addr);
+
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm, addr, len);
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+static inline void verify_mm_writelocked(struct mm_struct *mm)
+{
+#ifdef CONFIG_DEBUG_KERNEL
+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+ WARN_ON(1);
+ up_read(&mm->mmap_sem);
+ }
+#endif
+}
+
+/*
+ * this is really a simplified "do_mmap". it only handles
+ * anonymous maps. eventually we may be able to do some
+ * brk-specific accounting here.
+ */
+unsigned long do_brk(unsigned long addr, unsigned long len)
+{
+ struct mm_struct * mm = current->mm;
+ struct vm_area_struct * vma, * prev;
+ unsigned long flags;
+ struct rb_node ** rb_link, * rb_parent;
+ pgoff_t pgoff = addr >> PAGE_SHIFT;
+
+ len = PAGE_ALIGN(len);
+ if (!len)
+ return addr;
+
+ if ((addr + len) > TASK_SIZE || (addr + len) < addr)
+ return -EINVAL;
+
+ /*
+ * mlock MCL_FUTURE?
+ */
+ if (mm->def_flags & VM_LOCKED) {
+ unsigned long locked, lock_limit;
+ locked = mm->locked_vm << PAGE_SHIFT;
+ lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ locked += len;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ return -EAGAIN;
+ }
+
+ /*
+ * mm->mmap_sem is required to protect against another thread
+ * changing the mappings in case we sleep.
+ */
+ verify_mm_writelocked(mm);
+
+ /*
+ * Clear old maps. this also does some error checking for us
+ */
+ munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+ goto munmap_back;
+ }
+
+ /* Check against address space limits *after* clearing old maps... */
+ if ((mm->total_vm << PAGE_SHIFT) + len
+ > current->signal->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+
+ if (mm->map_count > sysctl_max_map_count)
+ return -ENOMEM;
+
+ if (security_vm_enough_memory(len >> PAGE_SHIFT))
+ return -ENOMEM;
+
+ flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+
+ /* Can we just expand an old private anonymous mapping? */
+ if (vma_merge(mm, prev, addr, addr + len, flags,
+ NULL, NULL, pgoff, NULL))
+ goto out;
+
+ /*
+ * create a vma struct for an anonymous mapping
+ */
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!vma) {
+ vm_unacct_memory(len >> PAGE_SHIFT);
+ return -ENOMEM;
+ }
+ memset(vma, 0, sizeof(*vma));
+
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+ vma->vm_pgoff = pgoff;
+ vma->vm_flags = flags;
+ vma->vm_page_prot = protection_map[flags & 0x0f];
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+out:
+ mm->total_vm += len >> PAGE_SHIFT;
+ if (flags & VM_LOCKED) {
+ mm->locked_vm += len >> PAGE_SHIFT;
+ make_pages_present(addr, addr + len);
+ }
+ acct_update_integrals();
+ update_mem_hiwater();
+ return addr;
+}
+
+EXPORT_SYMBOL(do_brk);
+
+/* Release all mmaps. */
+void exit_mmap(struct mm_struct *mm)
+{
+ struct mmu_gather *tlb;
+ struct vm_area_struct *vma;
+ unsigned long nr_accounted = 0;
+
+#ifdef arch_exit_mmap
+ arch_exit_mmap(mm);
+#endif
+
+ lru_add_drain();
+
+ spin_lock(&mm->page_table_lock);
+
+ tlb = tlb_gather_mmu(mm, 1);
+ flush_cache_mm(mm);
+ /* Use ~0UL here to ensure all VMAs in the mm are unmapped */
+ mm->map_count -= unmap_vmas(&tlb, mm, mm->mmap, 0,
+ ~0UL, &nr_accounted, NULL);
+ vm_unacct_memory(nr_accounted);
+ BUG_ON(mm->map_count); /* This is just debugging */
+ clear_page_range(tlb, FIRST_USER_PGD_NR * PGDIR_SIZE, MM_VM_SIZE(mm));
+
+ tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm));
+
+ vma = mm->mmap;
+ mm->mmap = mm->mmap_cache = NULL;
+ mm->mm_rb = RB_ROOT;
+ mm->rss = 0;
+ mm->total_vm = 0;
+ mm->locked_vm = 0;
+
+ spin_unlock(&mm->page_table_lock);
+
+ /*
+ * Walk the list again, actually closing and freeing it
+ * without holding any MM locks.
+ */
+ while (vma) {
+ struct vm_area_struct *next = vma->vm_next;
+ remove_vm_struct(vma);
+ vma = next;
+ }
+}
+
+/* Insert vm structure into process list sorted by address
+ * and into the inode's i_mmap tree. If vm_file is non-NULL
+ * then i_mmap_lock is taken here.
+ */
+int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+{
+ struct vm_area_struct * __vma, * prev;
+ struct rb_node ** rb_link, * rb_parent;
+
+ /*
+ * The vm_pgoff of a purely anonymous vma should be irrelevant
+ * until its first write fault, when page's anon_vma and index
+ * are set. But now set the vm_pgoff it will almost certainly
+ * end up with (unless mremap moves it elsewhere before that
+ * first wfault), so /proc/pid/maps tells a consistent story.
+ *
+ * By setting it to reflect the virtual start address of the
+ * vma, merges and splits can happen in a seamless way, just
+ * using the existing file pgoff checks and manipulations.
+ * Similarly in do_mmap_pgoff and in do_brk.
+ */
+ if (!vma->vm_file) {
+ BUG_ON(vma->anon_vma);
+ vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
+ }
+ __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
+ if (__vma && __vma->vm_start < vma->vm_end)
+ return -ENOMEM;
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ return 0;
+}
+
+/*
+ * Copy the vma structure to a new location in the same mm,
+ * prior to moving page table entries, to effect an mremap move.
+ */
+struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+ unsigned long addr, unsigned long len, pgoff_t pgoff)
+{
+ struct vm_area_struct *vma = *vmap;
+ unsigned long vma_start = vma->vm_start;
+ struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *new_vma, *prev;
+ struct rb_node **rb_link, *rb_parent;
+ struct mempolicy *pol;
+
+ /*
+ * If anonymous vma has not yet been faulted, update new pgoff
+ * to match new location, to increase its chance of merging.
+ */
+ if (!vma->vm_file && !vma->anon_vma)
+ pgoff = addr >> PAGE_SHIFT;
+
+ find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
+ vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
+ if (new_vma) {
+ /*
+ * Source vma may have been merged into new_vma
+ */
+ if (vma_start >= new_vma->vm_start &&
+ vma_start < new_vma->vm_end)
+ *vmap = new_vma;
+ } else {
+ new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (new_vma) {
+ *new_vma = *vma;
+ pol = mpol_copy(vma_policy(vma));
+ if (IS_ERR(pol)) {
+ kmem_cache_free(vm_area_cachep, new_vma);
+ return NULL;
+ }
+ vma_set_policy(new_vma, pol);
+ new_vma->vm_start = addr;
+ new_vma->vm_end = addr + len;
+ new_vma->vm_pgoff = pgoff;
+ if (new_vma->vm_file)
+ get_file(new_vma->vm_file);
+ if (new_vma->vm_ops && new_vma->vm_ops->open)
+ new_vma->vm_ops->open(new_vma);
+ vma_link(mm, new_vma, prev, rb_link, rb_parent);
+ }
+ }
+ return new_vma;
+}