aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS8
-rw-r--r--config/ia64.mk6
-rw-r--r--xen/arch/ia64/Makefile71
-rw-r--r--xen/arch/ia64/Rules.mk93
-rw-r--r--xen/arch/ia64/asm-offsets.c273
-rwxr-xr-xxen/arch/ia64/asm-xsi-offsets.c76
-rw-r--r--xen/arch/ia64/linux-xen/Makefile24
-rw-r--r--xen/arch/ia64/linux-xen/README.origin47
-rw-r--r--xen/arch/ia64/linux-xen/acpi.c1098
-rw-r--r--xen/arch/ia64/linux-xen/acpi_numa.c276
-rw-r--r--xen/arch/ia64/linux-xen/cmdline.c131
-rw-r--r--xen/arch/ia64/linux-xen/efi.c1334
-rw-r--r--xen/arch/ia64/linux-xen/entry.S1851
-rw-r--r--xen/arch/ia64/linux-xen/entry.h85
-rw-r--r--xen/arch/ia64/linux-xen/head.S1298
-rw-r--r--xen/arch/ia64/linux-xen/hpsim_ssc.h55
-rw-r--r--xen/arch/ia64/linux-xen/iosapic.c1288
-rw-r--r--xen/arch/ia64/linux-xen/irq_ia64.c350
-rw-r--r--xen/arch/ia64/linux-xen/mca.c1963
-rw-r--r--xen/arch/ia64/linux-xen/mca_asm.S1250
-rw-r--r--xen/arch/ia64/linux-xen/minstate.h306
-rw-r--r--xen/arch/ia64/linux-xen/mm_contig.c357
-rw-r--r--xen/arch/ia64/linux-xen/numa.c67
-rw-r--r--xen/arch/ia64/linux-xen/perfmon.c7871
-rw-r--r--xen/arch/ia64/linux-xen/perfmon_default_smpl.c311
-rw-r--r--xen/arch/ia64/linux-xen/perfmon_generic.h45
-rw-r--r--xen/arch/ia64/linux-xen/perfmon_itanium.h115
-rw-r--r--xen/arch/ia64/linux-xen/perfmon_mckinley.h187
-rw-r--r--xen/arch/ia64/linux-xen/perfmon_montecito.h269
-rw-r--r--xen/arch/ia64/linux-xen/process-linux-xen.c891
-rw-r--r--xen/arch/ia64/linux-xen/sal.c386
-rw-r--r--xen/arch/ia64/linux-xen/setup.c1056
-rw-r--r--xen/arch/ia64/linux-xen/smp.c495
-rw-r--r--xen/arch/ia64/linux-xen/smpboot.c977
-rw-r--r--xen/arch/ia64/linux-xen/sn/Makefile1
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/Makefile5
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/README.origin12
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/io_init.c793
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/iomv.c82
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/irq.c576
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/setup.c803
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c621
-rw-r--r--xen/arch/ia64/linux-xen/time.c273
-rw-r--r--xen/arch/ia64/linux-xen/tlb.c206
-rw-r--r--xen/arch/ia64/linux-xen/unaligned.c1985
-rw-r--r--xen/arch/ia64/linux-xen/unwind.c2393
-rw-r--r--xen/arch/ia64/linux-xen/unwind_decoder.c459
-rw-r--r--xen/arch/ia64/linux-xen/unwind_i.h164
-rw-r--r--xen/arch/ia64/linux/Makefile57
-rw-r--r--xen/arch/ia64/linux/README.origin34
-rw-r--r--xen/arch/ia64/linux/bitop.c88
-rw-r--r--xen/arch/ia64/linux/carta_random.S54
-rw-r--r--xen/arch/ia64/linux/clear_page.S77
-rw-r--r--xen/arch/ia64/linux/clear_user.S209
-rw-r--r--xen/arch/ia64/linux/copy_page_mck.S185
-rw-r--r--xen/arch/ia64/linux/dig/Makefile1
-rw-r--r--xen/arch/ia64/linux/dig/README.origin7
-rw-r--r--xen/arch/ia64/linux/dig/machvec.c3
-rw-r--r--xen/arch/ia64/linux/efi_stub.S86
-rw-r--r--xen/arch/ia64/linux/extable.c90
-rw-r--r--xen/arch/ia64/linux/flush.S61
-rw-r--r--xen/arch/ia64/linux/hp/Makefile1
-rw-r--r--xen/arch/ia64/linux/hp/zx1/Makefile1
-rw-r--r--xen/arch/ia64/linux/hp/zx1/README.origin7
-rw-r--r--xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c3
-rw-r--r--xen/arch/ia64/linux/hpsim.S10
-rw-r--r--xen/arch/ia64/linux/idiv32.S83
-rw-r--r--xen/arch/ia64/linux/idiv64.S80
-rw-r--r--xen/arch/ia64/linux/io.c164
-rw-r--r--xen/arch/ia64/linux/linuxextable.c71
-rw-r--r--xen/arch/ia64/linux/machvec.c70
-rw-r--r--xen/arch/ia64/linux/memcpy_mck.S661
-rw-r--r--xen/arch/ia64/linux/memset.S362
-rw-r--r--xen/arch/ia64/linux/numa.c49
-rw-r--r--xen/arch/ia64/linux/pal.S298
-rw-r--r--xen/arch/ia64/linux/pcdp.h111
-rw-r--r--xen/arch/ia64/linux/sn/Makefile2
-rw-r--r--xen/arch/ia64/linux/sn/kernel/Makefile3
-rw-r--r--xen/arch/ia64/linux/sn/kernel/README.origin9
-rw-r--r--xen/arch/ia64/linux/sn/kernel/machvec.c11
-rw-r--r--xen/arch/ia64/linux/sn/kernel/pio_phys.S71
-rw-r--r--xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S92
-rw-r--r--xen/arch/ia64/linux/sn/pci/Makefile1
-rw-r--r--xen/arch/ia64/linux/sn/pci/pcibr/Makefile1
-rw-r--r--xen/arch/ia64/linux/sn/pci/pcibr/README.origin7
-rw-r--r--xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c285
-rw-r--r--xen/arch/ia64/linux/strlen.S192
-rw-r--r--xen/arch/ia64/tools/README.RunVT46
-rw-r--r--xen/arch/ia64/tools/README.xenia6498
-rw-r--r--xen/arch/ia64/tools/README.xenoprof154
-rw-r--r--xen/arch/ia64/tools/linux-xen-diffs25
-rw-r--r--xen/arch/ia64/tools/p2m_expose/Makefile31
-rw-r--r--xen/arch/ia64/tools/p2m_expose/README.p2m_expose12
-rw-r--r--xen/arch/ia64/tools/p2m_expose/expose_p2m.c185
-rw-r--r--xen/arch/ia64/tools/privify/Makefile9
-rw-r--r--xen/arch/ia64/tools/privify/README.privify8
-rw-r--r--xen/arch/ia64/tools/privify/privify.c360
-rw-r--r--xen/arch/ia64/tools/privify/privify.h34
-rw-r--r--xen/arch/ia64/tools/privify/privify_elf64.c120
-rw-r--r--xen/arch/ia64/tools/privop/Makefile13
-rw-r--r--xen/arch/ia64/tools/privop/pohcalls.S30
-rw-r--r--xen/arch/ia64/tools/privop/postat.c27
-rwxr-xr-xxen/arch/ia64/tools/sparse-merge152
-rwxr-xr-xxen/arch/ia64/tools/xelilo/elilo.README20
-rwxr-xr-xxen/arch/ia64/tools/xelilo/xlilo.efibin373671 -> 0 bytes
-rw-r--r--xen/arch/ia64/vmx/Makefile24
-rw-r--r--xen/arch/ia64/vmx/mmio.c560
-rw-r--r--xen/arch/ia64/vmx/optvfault.S1184
-rw-r--r--xen/arch/ia64/vmx/pal_emul.c62
-rw-r--r--xen/arch/ia64/vmx/save.c69
-rw-r--r--xen/arch/ia64/vmx/sioemu.c243
-rw-r--r--xen/arch/ia64/vmx/vacpi.c268
-rw-r--r--xen/arch/ia64/vmx/viosapic.c408
-rw-r--r--xen/arch/ia64/vmx/vlsapic.c961
-rw-r--r--xen/arch/ia64/vmx/vmmu.c625
-rw-r--r--xen/arch/ia64/vmx/vmx_entry.S761
-rw-r--r--xen/arch/ia64/vmx/vmx_fault.c604
-rw-r--r--xen/arch/ia64/vmx/vmx_hypercall.c233
-rw-r--r--xen/arch/ia64/vmx/vmx_init.c661
-rw-r--r--xen/arch/ia64/vmx/vmx_interrupt.c167
-rw-r--r--xen/arch/ia64/vmx/vmx_ivt.S1364
-rw-r--r--xen/arch/ia64/vmx/vmx_minstate.h306
-rw-r--r--xen/arch/ia64/vmx/vmx_phy_mode.c344
-rw-r--r--xen/arch/ia64/vmx/vmx_support.c88
-rw-r--r--xen/arch/ia64/vmx/vmx_utility.c674
-rw-r--r--xen/arch/ia64/vmx/vmx_vcpu.c587
-rw-r--r--xen/arch/ia64/vmx/vmx_vcpu_save.c367
-rw-r--r--xen/arch/ia64/vmx/vmx_virt.c1636
-rw-r--r--xen/arch/ia64/vmx/vmx_vsa.S84
-rw-r--r--xen/arch/ia64/vmx/vtlb.c764
-rw-r--r--xen/arch/ia64/xen/Makefile44
-rw-r--r--xen/arch/ia64/xen/cpufreq/Makefile1
-rw-r--r--xen/arch/ia64/xen/cpufreq/cpufreq.c299
-rw-r--r--xen/arch/ia64/xen/crash.c49
-rw-r--r--xen/arch/ia64/xen/dom0_ops.c878
-rw-r--r--xen/arch/ia64/xen/dom_fw_asm.S43
-rw-r--r--xen/arch/ia64/xen/dom_fw_common.c706
-rw-r--r--xen/arch/ia64/xen/dom_fw_dom0.c563
-rw-r--r--xen/arch/ia64/xen/dom_fw_domu.c245
-rw-r--r--xen/arch/ia64/xen/dom_fw_sn2.c92
-rw-r--r--xen/arch/ia64/xen/dom_fw_utils.c361
-rw-r--r--xen/arch/ia64/xen/domain.c2488
-rw-r--r--xen/arch/ia64/xen/faults.c835
-rw-r--r--xen/arch/ia64/xen/flushd.S65
-rw-r--r--xen/arch/ia64/xen/flushtlb.c91
-rw-r--r--xen/arch/ia64/xen/fw_emul.c1622
-rw-r--r--xen/arch/ia64/xen/gdbstub.c819
-rw-r--r--xen/arch/ia64/xen/hpsimserial.c24
-rw-r--r--xen/arch/ia64/xen/hypercall.c825
-rw-r--r--xen/arch/ia64/xen/hyperprivop.S2225
-rw-r--r--xen/arch/ia64/xen/idle0_task.c29
-rw-r--r--xen/arch/ia64/xen/irq.c633
-rw-r--r--xen/arch/ia64/xen/ivt.S1435
-rw-r--r--xen/arch/ia64/xen/machine_kexec.c171
-rw-r--r--xen/arch/ia64/xen/mm.c3590
-rw-r--r--xen/arch/ia64/xen/mm_init.c111
-rw-r--r--xen/arch/ia64/xen/oprofile/Makefile1
-rw-r--r--xen/arch/ia64/xen/oprofile/perfmon.c205
-rw-r--r--xen/arch/ia64/xen/oprofile/xenoprof.c91
-rw-r--r--xen/arch/ia64/xen/pcdp.c281
-rw-r--r--xen/arch/ia64/xen/pci.c137
-rw-r--r--xen/arch/ia64/xen/platform_hypercall.c84
-rw-r--r--xen/arch/ia64/xen/privop.c889
-rw-r--r--xen/arch/ia64/xen/privop_stat.c153
-rw-r--r--xen/arch/ia64/xen/regionreg.c436
-rw-r--r--xen/arch/ia64/xen/relocate_kernel.S206
-rw-r--r--xen/arch/ia64/xen/sn_console.c154
-rw-r--r--xen/arch/ia64/xen/tlb_track.c533
-rw-r--r--xen/arch/ia64/xen/vcpu.c2320
-rw-r--r--xen/arch/ia64/xen/vhpt.c585
-rw-r--r--xen/arch/ia64/xen/xen.lds.S283
-rw-r--r--xen/arch/ia64/xen/xenasm.S649
-rw-r--r--xen/arch/ia64/xen/xenmem.c249
-rw-r--r--xen/arch/ia64/xen/xenmisc.c142
-rw-r--r--xen/arch/ia64/xen/xenpatch.c149
-rw-r--r--xen/arch/ia64/xen/xensetup.c719
-rw-r--r--xen/arch/ia64/xen/xentime.c263
-rw-r--r--xen/common/Makefile1
-rw-r--r--xen/common/grant_table.c2
-rw-r--r--xen/common/kexec.c4
-rw-r--r--xen/common/memory.c2
-rw-r--r--xen/common/page_alloc.c2
-rw-r--r--xen/common/tmem_xen.c2
-rw-r--r--xen/drivers/cpufreq/cpufreq.c11
-rw-r--r--xen/drivers/passthrough/Makefile1
-rw-r--r--xen/drivers/passthrough/io.c7
-rw-r--r--xen/drivers/passthrough/iommu.c19
-rw-r--r--xen/drivers/passthrough/pci.c3
-rw-r--r--xen/drivers/passthrough/vtd/Makefile1
-rw-r--r--xen/drivers/passthrough/vtd/ia64/Makefile1
-rw-r--r--xen/drivers/passthrough/vtd/ia64/vtd.c111
-rw-r--r--xen/drivers/passthrough/vtd/intremap.c43
-rw-r--r--xen/drivers/passthrough/vtd/iommu.c16
-rw-r--r--xen/drivers/passthrough/vtd/utils.c3
-rw-r--r--xen/drivers/passthrough/vtd/vtd.h38
-rw-r--r--xen/include/Makefile5
-rw-r--r--xen/include/asm-ia64/bug.h15
-rw-r--r--xen/include/asm-ia64/bundle.h239
-rw-r--r--xen/include/asm-ia64/config.h290
-rw-r--r--xen/include/asm-ia64/debugger.h116
-rw-r--r--xen/include/asm-ia64/dom_fw.h209
-rw-r--r--xen/include/asm-ia64/dom_fw_common.h115
-rw-r--r--xen/include/asm-ia64/dom_fw_dom0.h41
-rw-r--r--xen/include/asm-ia64/dom_fw_domu.h49
-rw-r--r--xen/include/asm-ia64/dom_fw_utils.h45
-rw-r--r--xen/include/asm-ia64/domain.h367
-rw-r--r--xen/include/asm-ia64/elf.h68
-rw-r--r--xen/include/asm-ia64/event.h85
-rw-r--r--xen/include/asm-ia64/flushtlb.h90
-rw-r--r--xen/include/asm-ia64/grant_table.h112
-rw-r--r--xen/include/asm-ia64/guest_access.h30
-rw-r--r--xen/include/asm-ia64/hardirq.h11
-rw-r--r--xen/include/asm-ia64/hvm/iommu.h35
-rw-r--r--xen/include/asm-ia64/hvm/irq.h108
-rw-r--r--xen/include/asm-ia64/hvm/support.h40
-rw-r--r--xen/include/asm-ia64/hvm/vacpi.h42
-rw-r--r--xen/include/asm-ia64/hvm/vlapic.h4
-rw-r--r--xen/include/asm-ia64/hypercall.h25
-rw-r--r--xen/include/asm-ia64/ia64_int.h56
-rw-r--r--xen/include/asm-ia64/init.h4
-rw-r--r--xen/include/asm-ia64/iocap.h23
-rw-r--r--xen/include/asm-ia64/kexec.h20
-rw-r--r--xen/include/asm-ia64/linux-null/README.origin3
-rw-r--r--xen/include/asm-ia64/linux-null/asm/cyclone.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/desc.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/ia32.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/mman.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/mmzone.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/module.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/nmi.h7
-rw-r--r--xen/include/asm-ia64/linux-null/asm/pdb.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/ptrace_offsets.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/scatterlist.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/semaphore.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/serial.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/signal.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/sn/arch.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/sn/geo.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/sn/nodepda.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/sn/sn_cpuid.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/ustack.h1
-rw-r--r--xen/include/asm-ia64/linux-null/asm/xen/hypervisor.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/bootmem.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/capability.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/completion.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/device.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/dmapool.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/file.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/kallsyms.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/kernel_stat.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/mmzone.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/module.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/mount.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/node.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/page-flags.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/pagemap.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/platform.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/pm.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/poll.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/proc_fs.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/profile.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/ptrace.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/random.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/rcupdate.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/rtc.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/rwsem.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/seq_file.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/serial.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/serial_core.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/signal.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/slab.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/smp_lock.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/swap.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/sysctl.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/threads.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/tty.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/vfs.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/vmalloc.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/workqueue.h1
-rw-r--r--xen/include/asm-ia64/linux-xen/asm-generic/README.origin8
-rw-r--r--xen/include/asm-ia64/linux-xen/asm-generic/pgtable-nopud.h69
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/README.origin51
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/acpi.h169
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/atomic.h259
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/cache.h37
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h605
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/hw_irq.h139
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/ia64regs.h104
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/io.h490
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/iosapic.h198
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/irq.h80
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/kregs.h167
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/machvec.h573
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/machvec_dig.h46
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h66
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h173
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/mca_asm.h410
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/meminit.h73
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/numa.h98
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/page.h227
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/pal.h1760
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/pci.h185
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/percpu.h87
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/perfmon.h286
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/perfmon_default_smpl.h83
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/pgalloc.h221
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/pgtable.h694
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/processor.h795
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/ptrace.h386
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sal.h891
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sections.h28
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/smp.h147
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/README.origin17
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/addrs.h299
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/arch.h92
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h95
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/intr.h73
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/io.h281
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h87
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h153
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/pcidev.h87
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h32
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/types.h28
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/spinlock.h100
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/system.h308
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/types.h87
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/README.origin23
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/completion.h61
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/device.h489
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/efi.h534
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/gfp.h148
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/hardirq.h116
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/interrupt.h307
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/kobject.h288
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/linux-pci.h836
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/oprofile.h119
-rw-r--r--xen/include/asm-ia64/linux/README.origin35
-rw-r--r--xen/include/asm-ia64/linux/asm-generic/README.origin15
-rw-r--r--xen/include/asm-ia64/linux/asm-generic/div64.h58
-rw-r--r--xen/include/asm-ia64/linux/asm-generic/ide_iops.h38
-rw-r--r--xen/include/asm-ia64/linux/asm-generic/iomap.h68
-rw-r--r--xen/include/asm-ia64/linux/asm-generic/pci.h42
-rw-r--r--xen/include/asm-ia64/linux/asm-generic/pgtable.h214
-rw-r--r--xen/include/asm-ia64/linux/asm-generic/sections.h16
-rw-r--r--xen/include/asm-ia64/linux/asm-generic/topology.h55
-rw-r--r--xen/include/asm-ia64/linux/asm-generic/unaligned.h122
-rw-r--r--xen/include/asm-ia64/linux/asm-generic/vmlinux.lds.h90
-rw-r--r--xen/include/asm-ia64/linux/asm/README.origin41
-rw-r--r--xen/include/asm-ia64/linux/asm/asmmacro.h111
-rw-r--r--xen/include/asm-ia64/linux/asm/bitops.h423
-rw-r--r--xen/include/asm-ia64/linux/asm/break.h23
-rw-r--r--xen/include/asm-ia64/linux/asm/byteorder.h42
-rw-r--r--xen/include/asm-ia64/linux/asm/cacheflush.h50
-rw-r--r--xen/include/asm-ia64/linux/asm/checksum.h76
-rw-r--r--xen/include/asm-ia64/linux/asm/current.h17
-rw-r--r--xen/include/asm-ia64/linux/asm/delay.h97
-rw-r--r--xen/include/asm-ia64/linux/asm/div64.h1
-rw-r--r--xen/include/asm-ia64/linux/asm/dma.h23
-rw-r--r--xen/include/asm-ia64/linux/asm/fpswa.h73
-rw-r--r--xen/include/asm-ia64/linux/asm/fpu.h66
-rw-r--r--xen/include/asm-ia64/linux/asm/hdreg.h14
-rw-r--r--xen/include/asm-ia64/linux/asm/intrinsics.h181
-rw-r--r--xen/include/asm-ia64/linux/asm/ioctl.h77
-rw-r--r--xen/include/asm-ia64/linux/asm/linkage.h6
-rw-r--r--xen/include/asm-ia64/linux/asm/machvec_hpsim.h18
-rw-r--r--xen/include/asm-ia64/linux/asm/machvec_init.h32
-rw-r--r--xen/include/asm-ia64/linux/asm/mca.h132
-rw-r--r--xen/include/asm-ia64/linux/asm/nodedata.h52
-rw-r--r--xen/include/asm-ia64/linux/asm/numnodes.h15
-rw-r--r--xen/include/asm-ia64/linux/asm/param.h42
-rw-r--r--xen/include/asm-ia64/linux/asm/patch.h25
-rw-r--r--xen/include/asm-ia64/linux/asm/rse.h66
-rw-r--r--xen/include/asm-ia64/linux/asm/setup.h6
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/README.origin24
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/geo.h132
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/klconfig.h246
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/l1.h51
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/leds.h33
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/module.h127
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h68
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/pda.h69
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/pic.h261
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/shub_mmr.h502
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/shubio.h3358
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/simulator.h20
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/sn_cpuid.h132
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/sn_feature_sets.h51
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/sn_sal.h1167
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/tiocp.h257
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/xbow.h301
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/xwidgetdev.h70
-rw-r--r--xen/include/asm-ia64/linux/asm/string.h22
-rw-r--r--xen/include/asm-ia64/linux/asm/thread_info.h94
-rw-r--r--xen/include/asm-ia64/linux/asm/timex.h40
-rw-r--r--xen/include/asm-ia64/linux/asm/topology.h128
-rw-r--r--xen/include/asm-ia64/linux/asm/unaligned.h6
-rw-r--r--xen/include/asm-ia64/linux/asm/unistd.h405
-rw-r--r--xen/include/asm-ia64/linux/asm/unwind.h240
-rw-r--r--xen/include/asm-ia64/linux/bcd.h20
-rw-r--r--xen/include/asm-ia64/linux/bitmap.h261
-rw-r--r--xen/include/asm-ia64/linux/bitops.h159
-rw-r--r--xen/include/asm-ia64/linux/byteorder/README.origin9
-rw-r--r--xen/include/asm-ia64/linux/byteorder/generic.h172
-rw-r--r--xen/include/asm-ia64/linux/byteorder/little_endian.h106
-rw-r--r--xen/include/asm-ia64/linux/byteorder/swab.h192
-rw-r--r--xen/include/asm-ia64/linux/hash.h58
-rw-r--r--xen/include/asm-ia64/linux/initrd.h20
-rw-r--r--xen/include/asm-ia64/linux/ioport.h136
-rw-r--r--xen/include/asm-ia64/linux/jiffies.h450
-rw-r--r--xen/include/asm-ia64/linux/klist.h61
-rw-r--r--xen/include/asm-ia64/linux/kmalloc_sizes.h33
-rw-r--r--xen/include/asm-ia64/linux/kref.h32
-rw-r--r--xen/include/asm-ia64/linux/linkage.h47
-rw-r--r--xen/include/asm-ia64/linux/mod_devicetable.h323
-rw-r--r--xen/include/asm-ia64/linux/notifier.h76
-rw-r--r--xen/include/asm-ia64/linux/pci_ids.h2356
-rw-r--r--xen/include/asm-ia64/linux/pci_regs.h488
-rw-r--r--xen/include/asm-ia64/linux/percpu.h61
-rw-r--r--xen/include/asm-ia64/linux/pm.h279
-rw-r--r--xen/include/asm-ia64/linux/preempt.h62
-rw-r--r--xen/include/asm-ia64/linux/seqlock.h175
-rw-r--r--xen/include/asm-ia64/linux/stddef.h20
-rw-r--r--xen/include/asm-ia64/linux/sysfs.h206
-rw-r--r--xen/include/asm-ia64/linux/thread_info.h92
-rw-r--r--xen/include/asm-ia64/linux/time.h181
-rw-r--r--xen/include/asm-ia64/linux/timex.h320
-rw-r--r--xen/include/asm-ia64/linux/topology.h144
-rw-r--r--xen/include/asm-ia64/linux/wait.h458
-rw-r--r--xen/include/asm-ia64/mach_apic.h1
-rw-r--r--xen/include/asm-ia64/mm.h586
-rw-r--r--xen/include/asm-ia64/mmu_context.h20
-rw-r--r--xen/include/asm-ia64/msi.h30
-rw-r--r--xen/include/asm-ia64/multicall.h31
-rw-r--r--xen/include/asm-ia64/offsets.h9
-rw-r--r--xen/include/asm-ia64/p2m_entry.h78
-rw-r--r--xen/include/asm-ia64/perfc.h18
-rw-r--r--xen/include/asm-ia64/perfc_defn.h176
-rw-r--r--xen/include/asm-ia64/privop.h13
-rw-r--r--xen/include/asm-ia64/privop_stat.h41
-rw-r--r--xen/include/asm-ia64/regionreg.h111
-rw-r--r--xen/include/asm-ia64/regs.h1
-rw-r--r--xen/include/asm-ia64/shadow.h77
-rw-r--r--xen/include/asm-ia64/shared.h4
-rw-r--r--xen/include/asm-ia64/sioemu.h29
-rw-r--r--xen/include/asm-ia64/slab.h3
-rw-r--r--xen/include/asm-ia64/softirq.h9
-rw-r--r--xen/include/asm-ia64/time.h12
-rw-r--r--xen/include/asm-ia64/tlb.h39
-rw-r--r--xen/include/asm-ia64/tlb_track.h155
-rw-r--r--xen/include/asm-ia64/tlbflush.h49
-rw-r--r--xen/include/asm-ia64/trace.h4
-rw-r--r--xen/include/asm-ia64/uaccess.h296
-rw-r--r--xen/include/asm-ia64/vcpu.h428
-rw-r--r--xen/include/asm-ia64/vcpumask.h60
-rw-r--r--xen/include/asm-ia64/vhpt.h107
-rw-r--r--xen/include/asm-ia64/viosapic.h76
-rw-r--r--xen/include/asm-ia64/virt_event.h114
-rw-r--r--xen/include/asm-ia64/vlsapic.h78
-rw-r--r--xen/include/asm-ia64/vmmu.h223
-rw-r--r--xen/include/asm-ia64/vmx.h60
-rw-r--r--xen/include/asm-ia64/vmx_mm_def.h159
-rw-r--r--xen/include/asm-ia64/vmx_pal.h122
-rw-r--r--xen/include/asm-ia64/vmx_pal_vsa.h53
-rw-r--r--xen/include/asm-ia64/vmx_phy_mode.h101
-rw-r--r--xen/include/asm-ia64/vmx_platform.h59
-rw-r--r--xen/include/asm-ia64/vmx_vcpu.h725
-rw-r--r--xen/include/asm-ia64/vmx_vcpu_save.h40
-rw-r--r--xen/include/asm-ia64/vmx_vpd.h126
-rw-r--r--xen/include/asm-ia64/vtm.h67
-rw-r--r--xen/include/asm-ia64/xengcc_intrin.h59
-rw-r--r--xen/include/asm-ia64/xenia64regs.h31
-rw-r--r--xen/include/asm-ia64/xenkregs.h98
-rw-r--r--xen/include/asm-ia64/xenmca.h34
-rw-r--r--xen/include/asm-ia64/xenoprof.h87
-rw-r--r--xen/include/asm-ia64/xenpage.h89
-rw-r--r--xen/include/asm-ia64/xenprocessor.h253
-rw-r--r--xen/include/asm-ia64/xensystem.h43
-rw-r--r--xen/include/asm-ia64/xentypes.h17
-rw-r--r--xen/include/asm-x86/hvm/irq.h7
-rw-r--r--xen/include/asm-x86/hvm/vioapic.h2
-rw-r--r--xen/include/xen/cpumask.h33
-rw-r--r--xen/include/xen/efi.h12
-rw-r--r--xen/include/xen/elfcore.h3
-rw-r--r--xen/include/xen/hvm/irq.h2
-rw-r--r--xen/include/xen/iommu.h4
-rw-r--r--xen/include/xen/irq.h22
-rw-r--r--xen/include/xen/libelf.h2
-rw-r--r--xen/include/xen/symbols.h10
488 files changed, 19 insertions, 117304 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index c1d8696e7b..a0c066a521 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -138,14 +138,6 @@ M: Tim Deegan <tim@xen.org>
S: Supported
F: tools/debugger/kdd/
-IA64 ARCHITECTURE
-M: KUWAMURA Shin'ya <kuwa@jp.fujitsu.com>
-S: Supported
-L: xen-ia64-devel@lists.xensource.com
-F: xen/arch/ia64/*
-F: xen/include/asm-ia64/*
-F: tools/libxc/ia64/*
-
INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
M: Joseph Cihula <joseph.cihula@intel.com>
M: Gang Wei <gang.wei@intel.com>
diff --git a/config/ia64.mk b/config/ia64.mk
deleted file mode 100644
index 50edf6313d..0000000000
--- a/config/ia64.mk
+++ /dev/null
@@ -1,6 +0,0 @@
-CONFIG_IA64 := y
-CONFIG_IA64_$(XEN_OS) := y
-
-CONFIG_IOEMU := y
-CONFIG_XCUTILS := y
-CONFIG_XENCOMM := y
diff --git a/xen/arch/ia64/Makefile b/xen/arch/ia64/Makefile
deleted file mode 100644
index 324b65a7f0..0000000000
--- a/xen/arch/ia64/Makefile
+++ /dev/null
@@ -1,71 +0,0 @@
-subdir-y += xen
-subdir-y += vmx
-subdir-y += linux
-subdir-y += linux-xen
-
-ALL_OBJS := linux-xen/head.o $(ALL_OBJS)
-
-$(TARGET)-syms: $(ALL_OBJS) xen.lds.s
- $(MAKE) -f $(BASEDIR)/Rules.mk $(BASEDIR)/common/symbols-dummy.o
- $(LD) $(LDFLAGS) -T xen.lds.s -N -Map $(@D)/.$(@F).0.map $(ALL_OBJS) \
- $(BASEDIR)/common/symbols-dummy.o -o $(@D)/.$(@F).0
- $(NM) -n $(@D)/.$(@F).0 | $(BASEDIR)/tools/symbols >$(@D)/.$(@F).0.S
- $(MAKE) -f $(BASEDIR)/Rules.mk $(@D)/.$(@F).0.o
- $(LD) $(LDFLAGS) -T xen.lds.s -N -Map $(@D)/.$(@F).1.map $(ALL_OBJS) \
- $(@D)/.$(@F).0.o -o $(@D)/.$(@F).1
- $(NM) -n $(@D)/.$(@F).1 | $(BASEDIR)/tools/symbols >$(@D)/.$(@F).1.S
- $(MAKE) -f $(BASEDIR)/Rules.mk $(@D)/.$(@F).1.o
- $(LD) $(LDFLAGS) -T xen.lds.s -N -Map $@.map $(ALL_OBJS) \
- $(@D)/.$(@F).1.o -o $@
- rm -f $(@D)/.$(@F).[0-9]*
-
-$(TARGET): $(TARGET)-syms
- $(NM) -n $< | grep -v ' [aUw] ' > $(@D)/System.map
- $(OBJCOPY) -R .note -R .comment -S $< $@
-
-# Headers do not depend on auto-generated header, but object files do.
-$(ALL_OBJS): $(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h
-
-asm-offsets.s: asm-offsets.c \
- $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
- $(CC) $(CFLAGS) -DGENERATE_ASM_OFFSETS -DIA64_TASK_SIZE=0 -S -o $@ $<
-
-asm-xsi-offsets.s: asm-xsi-offsets.c
- $(CC) $(CFLAGS) -S -o $@ $<
-
-$(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h: asm-xsi-offsets.s
- @(set -e; \
- echo "/*"; \
- echo " * DO NOT MODIFY."; \
- echo " *"; \
- echo " * This file was auto-generated from $<"; \
- echo " *"; \
- echo " */"; \
- echo ""; \
- echo "#ifndef __ASM_XSI_OFFSETS_H__"; \
- echo "#define __ASM_XSI_OFFSETS_H__"; \
- echo ""; \
- sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"; \
- echo ""; \
- echo "#endif") <$< >$@
-
-$(BASEDIR)/include/asm-ia64/.offsets.h.stamp:
-# Need such symbol link to make linux headers available
- [ -e $(BASEDIR)/include/linux ] \
- || ln -sf $(BASEDIR)/include/xen $(BASEDIR)/include/linux
- [ -e $(BASEDIR)/include/asm-ia64/xen ] \
- || ln -sf $(BASEDIR)/include/asm-ia64/linux $(BASEDIR)/include/asm-ia64/xen
- touch $@
-
-# I'm sure a Makefile wizard would know a better way to do this
-xen.lds.s: xen/xen.lds.S
- $(CC) -E $(CPPFLAGS) -P -DXEN $(AFLAGS) \
- -o xen.lds.s xen/xen.lds.S
-
-.PHONY: clean
-clean::
- rm -f *.o *~ core xen.lds.s $(BASEDIR)/include/asm-ia64/.offsets.h.stamp asm-offsets.s map.out
- rm -f asm-xsi-offsets.s $(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h
- rm -f $(BASEDIR)/System.map
- rm -f $(BASEDIR)/include/linux
- rm -f $(BASEDIR)/include/asm-ia64/xen
diff --git a/xen/arch/ia64/Rules.mk b/xen/arch/ia64/Rules.mk
deleted file mode 100644
index 054b4def94..0000000000
--- a/xen/arch/ia64/Rules.mk
+++ /dev/null
@@ -1,93 +0,0 @@
-########################################
-# ia64-specific definitions
-
-ia64 := y
-HAS_ACPI := y
-HAS_VGA := y
-HAS_CPUFREQ := y
-HAS_PCI := y
-HAS_PASSTHROUGH := y
-HAS_NS16550 := y
-HAS_KEXEC := y
-xenoprof := y
-no_warns ?= n
-vti_debug ?= n
-vmx_panic ?= n
-vhpt_disable ?= n
-xen_ia64_expose_p2m ?= y
-xen_ia64_pervcpu_vhpt ?= y
-xen_ia64_tlb_track ?= y
-xen_ia64_tlb_track_cnt ?= n
-xen_ia64_tlbflush_clock ?= y
-xen_ia64_disable_optvfault ?= n
-
-# If they are enabled,
-# shrink struct page_info assuming all mfn can be addressed by 32 bits.
-# However, with 50bit ia64 architected physical address and 16KB page size,
-# mfn isn't always assessed by 32bit. So they are disabled by default.
-xen_ia64_shrink_page_list ?= n
-xen_ia64_pickle_domain ?= n
-
-# Used only by linux/Makefile.
-AFLAGS_KERNEL += -mconstant-gp -nostdinc $(CPPFLAGS)
-
-CFLAGS += -nostdinc -fno-builtin -fno-common
-CFLAGS += -mconstant-gp
-#CFLAGS += -O3 # -O3 over-inlines making debugging tough!
-CFLAGS += -O2 # but no optimization causes compile errors!
-CFLAGS += -fomit-frame-pointer -D__KERNEL__
-CFLAGS += -iwithprefix include
-CPPFLAGS+= -I$(BASEDIR)/include \
- -I$(BASEDIR)/include/asm-ia64 \
- -I$(BASEDIR)/include/asm-ia64/linux \
- -I$(BASEDIR)/include/asm-ia64/linux-xen \
- -I$(BASEDIR)/include/asm-ia64/linux-null \
- -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64/linux-xen
-CFLAGS += $(CPPFLAGS)
-#CFLAGS += -Wno-pointer-arith -Wredundant-decls
-CFLAGS += -DIA64 -DXEN -DLINUX_2_6
-CFLAGS += -ffixed-r13 -mfixed-range=f2-f5,f12-f127,b2-b5
-CFLAGS += -g
-ifeq ($(vti_debug),y)
-CFLAGS += -DVTI_DEBUG
-endif
-ifeq ($(vmx_panic),y)
-CFLAGS += -DCONFIG_VMX_PANIC
-endif
-ifeq ($(xen_ia64_expose_p2m),y)
-CFLAGS += -DCONFIG_XEN_IA64_EXPOSE_P2M
-endif
-ifeq ($(xen_ia64_pervcpu_vhpt),y)
-CFLAGS += -DCONFIG_XEN_IA64_PERVCPU_VHPT
-ifeq ($(vhpt_disable),y)
-$(error "both xen_ia64_pervcpu_vhpt=y and vhpt_disable=y are enabled. they can't be enabled simultaneously. disable one of them.")
-endif
-endif
-ifeq ($(xen_ia64_tlb_track),y)
-CFLAGS += -DCONFIG_XEN_IA64_TLB_TRACK
-endif
-ifeq ($(xen_ia64_tlb_track_cnt),y)
-CFLAGS += -DCONFIG_TLB_TRACK_CNT
-endif
-ifeq ($(xen_ia64_tlbflush_clock),y)
-CFLAGS += -DCONFIG_XEN_IA64_TLBFLUSH_CLOCK
-endif
-ifeq ($(no_warns),y)
-CFLAGS += -Wa,--fatal-warnings -Werror -Wno-uninitialized
-endif
-ifneq ($(vhpt_disable),y)
-CFLAGS += -DVHPT_ENABLED=1
-else
-CFLAGS += -DVHPT_ENABLED=0
-endif
-ifeq ($(xen_ia64_disable_optvfault),y)
-CFLAGS += -DCONFIG_XEN_IA64_DISABLE_OPTVFAULT
-endif
-ifeq ($(xen_ia64_shrink_page_list),y)
-CFLAGS += -DCONFIG_IA64_SHRINK_PAGE_LIST
-endif
-ifeq ($(xen_ia64_pickle_domain),y)
-CFLAGS += -DCONFIG_IA64_PICKLE_DOMAIN
-endif
-
-LDFLAGS = -g
diff --git a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c
deleted file mode 100644
index e9e0ba5099..0000000000
--- a/xen/arch/ia64/asm-offsets.c
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- */
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/mca.h>
-#include <public/xen.h>
-#include <asm/tlb.h>
-#include <asm/regs.h>
-#include <asm/xenmca.h>
-
-#define task_struct vcpu
-
-#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " (%0) " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(_sym, _str, _mem) \
- DEFINE(_sym, offsetof(_str, _mem));
-
-void foo(void)
-{
- DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct));
- DEFINE(IA64_THREAD_INFO_SIZE, sizeof (struct thread_info));
- DEFINE(IA64_PT_REGS_SIZE, sizeof (struct pt_regs));
- DEFINE(IA64_SWITCH_STACK_SIZE, sizeof (struct switch_stack));
- DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64));
- DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
- DEFINE(MAPPED_REGS_T_SIZE, sizeof (mapped_regs_t));
-
- BLANK();
- DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, offsetof (struct ia64_mca_cpu, init_stack));
-
- BLANK();
- DEFINE(VCPU_VTM_OFFSET_OFS, offsetof(struct vcpu, arch.arch_vmx.vtm.vtm_offset));
- DEFINE(VCPU_VTM_LAST_ITC_OFS, offsetof(struct vcpu, arch.arch_vmx.vtm.last_itc));
- DEFINE(VCPU_VRR0_OFS, offsetof(struct vcpu, arch.arch_vmx.vrr[0]));
- DEFINE(VCPU_ITR0_OFS, offsetof(struct vcpu, arch.itrs[0]));
- DEFINE(VCPU_CALLBACK_OFS, offsetof(struct vcpu, arch.event_callback_ip));
-#ifdef VTI_DEBUG
- DEFINE(IVT_CUR_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_current));
- DEFINE(IVT_DBG_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_debug));
- DEFINE(IVT_DEBUG_SIZE, sizeof(struct ivt_debug));
-#endif
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
-
- BLANK();
-
- DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct vcpu, arch._thread.ksp));
- DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct vcpu, arch._thread.on_ustack));
-
- DEFINE(IA64_VCPU_HYPERCALL_CONTINUATION_OFS, offsetof (struct vcpu, arch.hypercall_continuation));
- DEFINE(IA64_VCPU_FP_PSR_OFFSET, offsetof (struct vcpu, arch.fp_psr));
- DEFINE(IA64_VCPU_META_RID_DT_OFFSET, offsetof (struct vcpu, arch.metaphysical_rid_dt));
- DEFINE(IA64_VCPU_META_RID_D_OFFSET, offsetof (struct vcpu, arch.metaphysical_rid_d));
- DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0));
- DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm));
- DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
- DEFINE(IA64_VCPU_EVENT_CALLBACK_IP_OFFSET, offsetof (struct vcpu, arch.event_callback_ip));
- DEFINE(IA64_VCPU_IRR0_OFFSET, offsetof (struct vcpu, arch.irr[0]));
- DEFINE(IA64_VCPU_IRR3_OFFSET, offsetof (struct vcpu, arch.irr[3]));
- DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
- DEFINE(IA64_VCPU_STARTING_RID_OFFSET, offsetof (struct vcpu, arch.starting_rid));
- DEFINE(IA64_VCPU_ENDING_RID_OFFSET, offsetof (struct vcpu, arch.ending_rid));
- DEFINE(IA64_VCPU_RID_BITS_OFFSET, offsetof (struct vcpu, arch.rid_bits));
- DEFINE(IA64_VCPU_DOMAIN_ITM_OFFSET, offsetof (struct vcpu, arch.domain_itm));
- DEFINE(IA64_VCPU_DOMAIN_ITM_LAST_OFFSET, offsetof (struct vcpu, arch.domain_itm_last));
- DEFINE(IA64_VCPU_ITLB_OFFSET, offsetof (struct vcpu, arch.itlb));
- DEFINE(IA64_VCPU_DTLB_OFFSET, offsetof (struct vcpu, arch.dtlb));
- DEFINE(IA64_VCPU_VHPT_PG_SHIFT_OFFSET, offsetof (struct vcpu, arch.vhpt_pg_shift));
-
- BLANK();
-
- DEFINE(IA64_VCPU_SHADOW_BITMAP_OFFSET, offsetof (struct vcpu, arch.shadow_bitmap));
-
- BLANK();
-
- DEFINE(IA64_CPUINFO_ITM_NEXT_OFFSET, offsetof (struct cpuinfo_ia64, itm_next));
- DEFINE(IA64_CPUINFO_KSOFTIRQD_OFFSET, offsetof (struct cpuinfo_ia64, ksoftirqd));
-
-
- BLANK();
-
- DEFINE(IA64_PT_REGS_B6_OFFSET, offsetof (struct pt_regs, b6));
- DEFINE(IA64_PT_REGS_B7_OFFSET, offsetof (struct pt_regs, b7));
- DEFINE(IA64_PT_REGS_AR_CSD_OFFSET, offsetof (struct pt_regs, ar_csd));
- DEFINE(IA64_PT_REGS_AR_SSD_OFFSET, offsetof (struct pt_regs, ar_ssd));
- DEFINE(IA64_PT_REGS_R8_OFFSET, offsetof (struct pt_regs, r8));
- DEFINE(IA64_PT_REGS_R9_OFFSET, offsetof (struct pt_regs, r9));
- DEFINE(IA64_PT_REGS_R10_OFFSET, offsetof (struct pt_regs, r10));
- DEFINE(IA64_PT_REGS_R11_OFFSET, offsetof (struct pt_regs, r11));
- DEFINE(IA64_PT_REGS_CR_IPSR_OFFSET, offsetof (struct pt_regs, cr_ipsr));
- DEFINE(IA64_PT_REGS_CR_IIP_OFFSET, offsetof (struct pt_regs, cr_iip));
- DEFINE(IA64_PT_REGS_CR_IFS_OFFSET, offsetof (struct pt_regs, cr_ifs));
- DEFINE(IA64_PT_REGS_AR_UNAT_OFFSET, offsetof (struct pt_regs, ar_unat));
- DEFINE(IA64_PT_REGS_AR_PFS_OFFSET, offsetof (struct pt_regs, ar_pfs));
- DEFINE(IA64_PT_REGS_AR_RSC_OFFSET, offsetof (struct pt_regs, ar_rsc));
- DEFINE(IA64_PT_REGS_AR_RNAT_OFFSET, offsetof (struct pt_regs, ar_rnat));
-
- DEFINE(IA64_PT_REGS_AR_BSPSTORE_OFFSET, offsetof (struct pt_regs, ar_bspstore));
- DEFINE(IA64_PT_REGS_PR_OFFSET, offsetof (struct pt_regs, pr));
- DEFINE(IA64_PT_REGS_B0_OFFSET, offsetof (struct pt_regs, b0));
- DEFINE(IA64_PT_REGS_LOADRS_OFFSET, offsetof (struct pt_regs, loadrs));
- DEFINE(IA64_PT_REGS_R1_OFFSET, offsetof (struct pt_regs, r1));
- DEFINE(IA64_PT_REGS_R12_OFFSET, offsetof (struct pt_regs, r12));
- DEFINE(IA64_PT_REGS_R13_OFFSET, offsetof (struct pt_regs, r13));
- DEFINE(IA64_PT_REGS_AR_FPSR_OFFSET, offsetof (struct pt_regs, ar_fpsr));
- DEFINE(IA64_PT_REGS_R15_OFFSET, offsetof (struct pt_regs, r15));
- DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
- DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
- DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
- DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
- DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
- DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
- DEFINE(IA64_PT_REGS_R19_OFFSET, offsetof (struct pt_regs, r19));
- DEFINE(IA64_PT_REGS_R20_OFFSET, offsetof (struct pt_regs, r20));
- DEFINE(IA64_PT_REGS_R21_OFFSET, offsetof (struct pt_regs, r21));
- DEFINE(IA64_PT_REGS_R22_OFFSET, offsetof (struct pt_regs, r22));
- DEFINE(IA64_PT_REGS_R23_OFFSET, offsetof (struct pt_regs, r23));
- DEFINE(IA64_PT_REGS_R24_OFFSET, offsetof (struct pt_regs, r24));
- DEFINE(IA64_PT_REGS_R25_OFFSET, offsetof (struct pt_regs, r25));
- DEFINE(IA64_PT_REGS_R26_OFFSET, offsetof (struct pt_regs, r26));
- DEFINE(IA64_PT_REGS_R27_OFFSET, offsetof (struct pt_regs, r27));
- DEFINE(IA64_PT_REGS_R28_OFFSET, offsetof (struct pt_regs, r28));
- DEFINE(IA64_PT_REGS_R29_OFFSET, offsetof (struct pt_regs, r29));
- DEFINE(IA64_PT_REGS_R30_OFFSET, offsetof (struct pt_regs, r30));
- DEFINE(IA64_PT_REGS_R31_OFFSET, offsetof (struct pt_regs, r31));
- DEFINE(IA64_PT_REGS_AR_CCV_OFFSET, offsetof (struct pt_regs, ar_ccv));
- DEFINE(IA64_PT_REGS_F6_OFFSET, offsetof (struct pt_regs, f6));
- DEFINE(IA64_PT_REGS_F7_OFFSET, offsetof (struct pt_regs, f7));
- DEFINE(IA64_PT_REGS_F8_OFFSET, offsetof (struct pt_regs, f8));
- DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9));
- DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10));
- DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11));
- DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct pt_regs, r4));
- DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct pt_regs, r5));
- DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct pt_regs, r6));
- DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct pt_regs, r7));
- DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs, eml_unat));
- DEFINE(IA64_VCPU_IIPA_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_iipa));
- DEFINE(IA64_VCPU_ISR_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_isr));
- DEFINE(IA64_VCPU_CAUSE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cause));
- DEFINE(IA64_VCPU_OPCODE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.opcode));
- DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
- DEFINE(IA64_PT_REGS_R16_SLOT, (((offsetof(struct pt_regs, r16)-sizeof(struct pt_regs))>>3)&0x3f));
- DEFINE(IA64_PT_REGS_R2_SLOT, (((offsetof(struct pt_regs, r16)-sizeof(struct pt_regs))>>3)&0x3f));
- DEFINE(IA64_PT_REGS_R8_SLOT, (((offsetof(struct pt_regs, r16)-sizeof(struct pt_regs))>>3)&0x3f));
- DEFINE(IA64_VCPU_FLAGS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.flags));
- DEFINE(IA64_VCPU_MMU_MODE_OFFSET,offsetof(struct vcpu, arch.arch_vmx.mmu_mode));
-
- BLANK();
-
- DEFINE(IA64_SWITCH_STACK_CALLER_UNAT_OFFSET, offsetof (struct switch_stack, caller_unat));
- DEFINE(IA64_SWITCH_STACK_AR_FPSR_OFFSET, offsetof (struct switch_stack, ar_fpsr));
- DEFINE(IA64_SWITCH_STACK_F2_OFFSET, offsetof (struct switch_stack, f2));
- DEFINE(IA64_SWITCH_STACK_F3_OFFSET, offsetof (struct switch_stack, f3));
- DEFINE(IA64_SWITCH_STACK_F4_OFFSET, offsetof (struct switch_stack, f4));
- DEFINE(IA64_SWITCH_STACK_F5_OFFSET, offsetof (struct switch_stack, f5));
- DEFINE(IA64_SWITCH_STACK_F12_OFFSET, offsetof (struct switch_stack, f12));
- DEFINE(IA64_SWITCH_STACK_F13_OFFSET, offsetof (struct switch_stack, f13));
- DEFINE(IA64_SWITCH_STACK_F14_OFFSET, offsetof (struct switch_stack, f14));
- DEFINE(IA64_SWITCH_STACK_F15_OFFSET, offsetof (struct switch_stack, f15));
- DEFINE(IA64_SWITCH_STACK_F16_OFFSET, offsetof (struct switch_stack, f16));
- DEFINE(IA64_SWITCH_STACK_F17_OFFSET, offsetof (struct switch_stack, f17));
- DEFINE(IA64_SWITCH_STACK_F18_OFFSET, offsetof (struct switch_stack, f18));
- DEFINE(IA64_SWITCH_STACK_F19_OFFSET, offsetof (struct switch_stack, f19));
- DEFINE(IA64_SWITCH_STACK_F20_OFFSET, offsetof (struct switch_stack, f20));
- DEFINE(IA64_SWITCH_STACK_F21_OFFSET, offsetof (struct switch_stack, f21));
- DEFINE(IA64_SWITCH_STACK_F22_OFFSET, offsetof (struct switch_stack, f22));
- DEFINE(IA64_SWITCH_STACK_F23_OFFSET, offsetof (struct switch_stack, f23));
- DEFINE(IA64_SWITCH_STACK_F24_OFFSET, offsetof (struct switch_stack, f24));
- DEFINE(IA64_SWITCH_STACK_F25_OFFSET, offsetof (struct switch_stack, f25));
- DEFINE(IA64_SWITCH_STACK_F26_OFFSET, offsetof (struct switch_stack, f26));
- DEFINE(IA64_SWITCH_STACK_F27_OFFSET, offsetof (struct switch_stack, f27));
- DEFINE(IA64_SWITCH_STACK_F28_OFFSET, offsetof (struct switch_stack, f28));
- DEFINE(IA64_SWITCH_STACK_F29_OFFSET, offsetof (struct switch_stack, f29));
- DEFINE(IA64_SWITCH_STACK_F30_OFFSET, offsetof (struct switch_stack, f30));
- DEFINE(IA64_SWITCH_STACK_F31_OFFSET, offsetof (struct switch_stack, f31));
- DEFINE(IA64_SWITCH_STACK_R4_OFFSET, offsetof (struct switch_stack, r4));
- DEFINE(IA64_SWITCH_STACK_R5_OFFSET, offsetof (struct switch_stack, r5));
- DEFINE(IA64_SWITCH_STACK_R6_OFFSET, offsetof (struct switch_stack, r6));
- DEFINE(IA64_SWITCH_STACK_R7_OFFSET, offsetof (struct switch_stack, r7));
- DEFINE(IA64_SWITCH_STACK_B0_OFFSET, offsetof (struct switch_stack, b0));
- DEFINE(IA64_SWITCH_STACK_B1_OFFSET, offsetof (struct switch_stack, b1));
- DEFINE(IA64_SWITCH_STACK_B2_OFFSET, offsetof (struct switch_stack, b2));
- DEFINE(IA64_SWITCH_STACK_B3_OFFSET, offsetof (struct switch_stack, b3));
- DEFINE(IA64_SWITCH_STACK_B4_OFFSET, offsetof (struct switch_stack, b4));
- DEFINE(IA64_SWITCH_STACK_B5_OFFSET, offsetof (struct switch_stack, b5));
- DEFINE(IA64_SWITCH_STACK_AR_PFS_OFFSET, offsetof (struct switch_stack, ar_pfs));
- DEFINE(IA64_SWITCH_STACK_AR_LC_OFFSET, offsetof (struct switch_stack, ar_lc));
- DEFINE(IA64_SWITCH_STACK_AR_UNAT_OFFSET, offsetof (struct switch_stack, ar_unat));
- DEFINE(IA64_SWITCH_STACK_AR_RNAT_OFFSET, offsetof (struct switch_stack, ar_rnat));
- DEFINE(IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET, offsetof (struct switch_stack, ar_bspstore));
- DEFINE(IA64_SWITCH_STACK_PR_OFFSET, offsetof (struct switch_stack, pr));
-
- BLANK();
-
- DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs));
- DEFINE(IA64_VPD_VIFS_OFFSET, offsetof (mapped_regs_t, ifs));
- DEFINE(IA64_VPD_VHPI_OFFSET, offsetof (mapped_regs_t, vhpi));
- DEFINE(IA64_VPD_VB1REG_OFFSET, offsetof (mapped_regs_t, bank1_regs[0]));
- DEFINE(IA64_VPD_VB0REG_OFFSET, offsetof (mapped_regs_t, bank0_regs[0]));
- DEFINE(IA64_VPD_VB1NAT_OFFSET, offsetof (mapped_regs_t, vnat));
- DEFINE(IA64_VPD_VB0NAT_OFFSET, offsetof (mapped_regs_t, vbnat));
- DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, arch.insvc[0]));
- DEFINE(IA64_VPD_VPTA_OFFSET, offsetof (struct mapped_regs, pta));
- DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
-
- BLANK();
- DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));
- DEFINE(IA64_CPUINFO_PTCE_BASE_OFFSET, offsetof (struct cpuinfo_ia64, ptce_base));
- DEFINE(IA64_CPUINFO_PTCE_COUNT_OFFSET, offsetof (struct cpuinfo_ia64, ptce_count));
- DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
-
-
- DEFINE(CLONE_IDLETASK_BIT, 12);
- DEFINE(CLONE_SETTLS_BIT, 19);
- DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));
-
- BLANK();
- DEFINE(IA64_KR_CURRENT_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_CURRENT]));
- DEFINE(IA64_KR_PT_BASE_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_PT_BASE]));
- DEFINE(IA64_KR_IO_BASE_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_IO_BASE]));
- DEFINE(IA64_KR_PERCPU_DATA_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_PER_CPU_DATA]));
- DEFINE(IA64_KR_IO_BASE_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_IO_BASE]));
- DEFINE(IA64_KR_CURRENT_STACK_OFFSET, offsetof (cpu_kr_ia64_t, _kr[IA64_KR_CURRENT_STACK]));
-
-#ifdef PERF_COUNTERS
- BLANK();
- DEFINE(IA64_PERFC_recover_to_page_fault, PERFC_recover_to_page_fault);
- DEFINE(IA64_PERFC_recover_to_break_fault, PERFC_recover_to_break_fault);
- DEFINE(IA64_PERFC_fast_vhpt_translate, PERFC_fast_vhpt_translate);
- DEFINE(IA64_PERFC_fast_hyperprivop, PERFC_fast_hyperprivop);
- DEFINE(IA64_PERFC_fast_reflect, PERFC_fast_reflect);
-#endif
-
- BLANK();
- DEFINE(IA64_CPUINFO_PTCE_BASE_OFFSET,
- offsetof(struct cpuinfo_ia64, ptce_base));
- DEFINE(IA64_CPUINFO_PTCE_COUNT_OFFSET,
- offsetof(struct cpuinfo_ia64, ptce_count));
- DEFINE(IA64_CPUINFO_PTCE_STRIDE_OFFSET,
- offsetof(struct cpuinfo_ia64, ptce_stride));
-
- BLANK();
- DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET,
- offsetof(struct ia64_mca_cpu, proc_state_dump));
- DEFINE(IA64_MCA_CPU_STACK_OFFSET,
- offsetof(struct ia64_mca_cpu, stack));
- DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET,
- offsetof(struct ia64_mca_cpu, stackframe));
- DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET,
- offsetof(struct ia64_mca_cpu, rbstore));
-
-#if VHPT_ENABLED
- DEFINE(IA64_VCPU_VHPT_PAGE_OFFSET,
- offsetof(struct vcpu, arch.vhpt_page));
- DEFINE(IA64_VCPU_VHPT_MADDR_OFFSET,
- offsetof(struct vcpu, arch.vhpt_maddr));
-#endif
-
- BLANK();
- DEFINE(IA64_MCA_TLB_INFO_SIZE, sizeof(struct ia64_mca_tlb_info));
- DEFINE(IA64_MCA_PERCPU_OFFSET,
- offsetof(struct ia64_mca_tlb_info, percpu_paddr));
-}
diff --git a/xen/arch/ia64/asm-xsi-offsets.c b/xen/arch/ia64/asm-xsi-offsets.c
deleted file mode 100755
index 4e84d41077..0000000000
--- a/xen/arch/ia64/asm-xsi-offsets.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * asm-xsi-offsets.c_
- * Copyright (c) 2005, Intel Corporation.
- * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
- * Eddie Dong <eddie.dong@intel.com>
- * Fred Yang <fred.yang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-/*
- * Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- */
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <public/xen.h>
-#include <asm/tlb.h>
-#include <asm/regs.h>
-
-#define task_struct vcpu
-
-#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " (%0) " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define DEFINE_MAPPED_REG_OFS(sym, field) \
- DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(mapped_regs_t, field)))
-
-void foo(void)
-{
- DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
- DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
- DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
- DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
- DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
- DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
- DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
- DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
- DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
- DEFINE_MAPPED_REG_OFS(XSI_TPR_OFS, tpr);
- DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
- DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
- DEFINE_MAPPED_REG_OFS(XSI_ITV_OFS, itv);
- DEFINE_MAPPED_REG_OFS(XSI_PTA_OFS, pta);
- DEFINE_MAPPED_REG_OFS(XSI_VPSR_DFH_OFS, vpsr_dfh);
- DEFINE_MAPPED_REG_OFS(XSI_HPSR_DFH_OFS, hpsr_dfh);
- DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
- DEFINE_MAPPED_REG_OFS(XSI_VPSR_PP_OFS, vpsr_pp);
- DEFINE_MAPPED_REG_OFS(XSI_METAPHYS_OFS, metaphysical_mode);
- DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
- DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
- DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
- DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
- DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
- DEFINE_MAPPED_REG_OFS(XSI_RR0_OFS, rrs[0]);
- DEFINE_MAPPED_REG_OFS(XSI_KR0_OFS, krs[0]);
-}
diff --git a/xen/arch/ia64/linux-xen/Makefile b/xen/arch/ia64/linux-xen/Makefile
deleted file mode 100644
index b2f207db84..0000000000
--- a/xen/arch/ia64/linux-xen/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-subdir-y += sn
-
-obj-y += cmdline.o
-obj-y += efi.o
-obj-y += entry.o
-obj-y += irq_ia64.o
-obj-y += mca.o
-obj-y += mca_asm.o
-obj-y += mm_contig.o
-obj-y += process-linux-xen.o
-obj-y += sal.o
-obj-y += setup.o
-obj-y += smpboot.o
-obj-y += smp.o
-obj-y += time.o
-obj-y += tlb.o
-obj-y += unaligned.o
-obj-y += unwind.o
-obj-y += iosapic.o
-obj-y += numa.o
-obj-y += perfmon.o
-obj-y += perfmon_default_smpl.o
-obj-y += acpi.o
-obj-y += acpi_numa.o
diff --git a/xen/arch/ia64/linux-xen/README.origin b/xen/arch/ia64/linux-xen/README.origin
deleted file mode 100644
index 98aa152852..0000000000
--- a/xen/arch/ia64/linux-xen/README.origin
+++ /dev/null
@@ -1,47 +0,0 @@
-# Source files in this directory are near-identical copies of linux-2.6.13
-# files:
-
-# NOTE: ALL changes to these files should be clearly marked
-# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
-# easily updated to future versions of the corresponding Linux files.
-
-cmdline.c -> linux/lib/cmdline.c
-entry.h -> linux/arch/ia64/kernel/entry.h
-entry.S -> linux/arch/ia64/kernel/entry.S
-head.S -> linux/arch/ia64/kernel/head.S
-hpsim_ssc.h -> linux/arch/ia64/hp/sim/hpsim_ssc.h
-irq_ia64.c -> linux/arch/ia64/kernel/irq_ia64.c
-mca.c -> linux/arch/ia64/kernel/mca.c
-mca_asm.S -> linux/arch/ia64/kernel/mca_asm.S
-minstate.h -> linux/arch/ia64/kernel/minstate.h
-mm_contig.c -> linux/arch/ia64/mm/contig.c
-numa.c -> linux/arch/ia64/kernel/numa.c
-process-linux-xen.c -> linux/arch/ia64/kernel/process.c
-sal.c -> linux/arch/ia64/kernel/sal.c
-setup.c -> linux/arch/ia64/kernel/setup.c
-smp.c -> linux/arch/ia64/kernel/smp.c
-smpboot.c -> linux/arch/ia64/kernel/smpboot.c
-time.c -> linux/arch/ia64/kernel/time.c
-tlb.c -> linux/arch/ia64/mm/tlb.c
-unaligned.c -> linux/arch/ia64/kernel/unaligned.c
-unwind.c -> linux/arch/ia64/kernel/unwind.c
-unwind_decoder.c -> linux/arch/ia64/kernel/unwind_decoder.c
-unwind_i.h -> linux/arch/ia64/kernel/unwind_i.h
-
-# The files below are from Linux-2.6.16
-iosapic.c -> linux/arch/ia64/kernel/iosapic.c
-
-# The files below are from Linux-2.6.16.33
-perfmon.c -> linux/arch/kernel/perfmon.c
-perfmon_default_smpl.c -> linux/arch/kernel/perfmon_default_smpl.c
-perfmon_generic.h -> linux/arch/kernel/perfmon_generic.h
-perfmon_itanium.h -> linux/arch/kernel/perfmon_itanium.h
-perfmon_mckinley.h -> linux/arch/kernel/perfmon_mckinley.h
-perfmon_montecito.h -> linux/arch/kernel/perfmon_montecito.h
-
-# The files below are from Linux-2.6.21
-efi.c -> linux/arch/ia64/kernel/efi.c
-
-# The files below are from Linux-2.6.26-rc5
-acpi.c -> linux/arch/ia64/kernel/acpi.c
-acpi_numa.c -> linux/drivers/acpi/numa.c \ No newline at end of file
diff --git a/xen/arch/ia64/linux-xen/acpi.c b/xen/arch/ia64/linux-xen/acpi.c
deleted file mode 100644
index 23a08849ef..0000000000
--- a/xen/arch/ia64/linux-xen/acpi.c
+++ /dev/null
@@ -1,1098 +0,0 @@
-/*
- * acpi.c - Architecture-Specific Low-Level ACPI Support
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2000 Intel Corp.
- * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
- * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
- * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
- * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
- * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
- * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/irq.h>
-#include <linux/acpi.h>
-#include <linux/efi.h>
-#include <linux/mmzone.h>
-#include <linux/nodemask.h>
-#include <asm/io.h>
-#include <asm/iosapic.h>
-#include <asm/machvec.h>
-#include <asm/page.h>
-#include <asm/system.h>
-#include <asm/numa.h>
-#include <asm/sal.h>
-#include <asm/cyclone.h>
-#include <asm/xen/hypervisor.h>
-#ifdef XEN
-#include <asm/hw_irq.h>
-#include <asm/numa.h>
-extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
-#endif
-
-
-#define BAD_MADT_ENTRY(entry, end) ( \
- (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
- ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-
-#define PREFIX "ACPI: "
-
-void (*pm_idle) (void) __read_mostly;
-EXPORT_SYMBOL(pm_idle);
-void (*pm_power_off) (void) __read_mostly;
-EXPORT_SYMBOL(pm_power_off);
-
-unsigned int acpi_cpei_override;
-unsigned int acpi_cpei_phys_cpuid;
-
-unsigned long acpi_wakeup_address = 0;
-
-#ifdef CONFIG_IA64_GENERIC
-static unsigned long __init acpi_find_rsdp(void)
-{
- unsigned long rsdp_phys = 0;
-
- if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
- rsdp_phys = efi.acpi20;
- else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
- printk(KERN_WARNING PREFIX
- "v1.0/r0.71 tables no longer supported\n");
- return rsdp_phys;
-}
-#endif
-
-const char __init *
-acpi_get_sysname(void)
-{
-#ifdef CONFIG_IA64_GENERIC
- unsigned long rsdp_phys;
- struct acpi_table_rsdp *rsdp;
- struct acpi_table_xsdt *xsdt;
- struct acpi_table_header *hdr;
-
- rsdp_phys = acpi_find_rsdp();
- if (!rsdp_phys) {
- printk(KERN_ERR
- "ACPI 2.0 RSDP not found, default to \"dig\"\n");
- return "dig";
- }
-
- rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
- if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) {
- printk(KERN_ERR
- "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
- return "dig";
- }
-
- xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
- hdr = &xsdt->header;
- if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) {
- printk(KERN_ERR
- "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
- return "dig";
- }
-
- if (!strcmp(hdr->oem_id, "HP")) {
- return "hpzx1";
- } else if (!strcmp(hdr->oem_id, "SGI")) {
- if (!strcmp(hdr->oem_table_id + 4, "UV"))
- return "uv";
- else
- return "sn2";
-#ifndef XEN
- } else if (is_running_on_xen() && !strcmp(hdr->oem_id, "XEN")) {
- return "xen";
-#endif
- }
-
- return "dig";
-#else
-# if defined (CONFIG_IA64_HP_SIM)
- return "hpsim";
-# elif defined (CONFIG_IA64_HP_ZX1)
- return "hpzx1";
-# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
- return "hpzx1_swiotlb";
-# elif defined (CONFIG_IA64_SGI_SN2)
- return "sn2";
-# elif defined (CONFIG_IA64_SGI_UV)
- return "uv";
-# elif defined (CONFIG_IA64_DIG)
- return "dig";
-# elif defined (CONFIG_IA64_XEN)
- return "xen";
-# else
-# error Unknown platform. Fix acpi.c.
-# endif
-#endif
-}
-
-#ifdef CONFIG_ACPI
-
-#define ACPI_MAX_PLATFORM_INTERRUPTS 256
-
-/* Array to record platform interrupt vectors for generic interrupt routing. */
-int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
- [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
-};
-
-/*
- * Interrupt routing API for device drivers. Provides interrupt vector for
- * a generic platform event. Currently only CPEI is implemented.
- */
-int acpi_request_vector(u32 int_type)
-{
- int vector = -1;
-
- if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
- /* corrected platform error interrupt */
- vector = platform_intr_list[int_type];
- } else
- printk(KERN_ERR
- "acpi_request_vector(): invalid interrupt type\n");
- return vector;
-}
-
-char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
-{
- return __va(phys_addr);
-}
-
-/* --------------------------------------------------------------------------
- Boot-time Table Parsing
- -------------------------------------------------------------------------- */
-
-static int total_cpus __initdata;
-static int available_cpus __initdata;
-struct acpi_table_madt *acpi_madt __initdata;
-static u8 has_8259;
-
-static int __init
-acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
- const unsigned long end)
-{
- struct acpi_madt_local_apic_override *lapic;
-
- lapic = (struct acpi_madt_local_apic_override *)header;
-
- if (BAD_MADT_ENTRY(lapic, end))
- return -EINVAL;
-
- if (lapic->address) {
- iounmap(ipi_base_addr);
- ipi_base_addr = ioremap(lapic->address, 0);
- }
- return 0;
-}
-
-#ifdef XEN
-
-#define MAX_LOCAL_SAPIC 255
-static u16 ia64_acpiid_to_sapicid[ MAX_LOCAL_SAPIC ] =
- {[0 ... MAX_LOCAL_SAPIC - 1] = 0xffff };
-
-/* acpi id to cpu id */
-int get_cpu_id(u32 acpi_id)
-{
- int i;
- u16 apic_id;
-
- if ( acpi_id >= MAX_LOCAL_SAPIC )
- return -EINVAL;
-
- apic_id = ia64_acpiid_to_sapicid[acpi_id];
- if ( apic_id == 0xffff )
- return -EINVAL;
-
- for ( i = 0; i < NR_CPUS; i++ )
- {
- if ( apic_id == ia64_cpu_to_sapicid[i] )
- return i;
- }
-
- return -1;
-}
-
-int arch_acpi_set_pdc_bits(u32 acpi_id, u32 *pdc, u32 mask)
-{
- pdc[2] |= ACPI_PDC_EST_CAPABILITY_SMP & mask;
- return 0;
-}
-
-#endif
-
-static int __init
-acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
-{
- struct acpi_madt_local_sapic *lsapic;
-
- lsapic = (struct acpi_madt_local_sapic *)header;
-
- /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
-
- if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
-#ifdef CONFIG_SMP
- smp_boot_data.cpu_phys_id[available_cpus] =
- (lsapic->id << 8) | lsapic->eid;
-#endif
-#ifdef XEN
- ia64_acpiid_to_sapicid[lsapic->processor_id] =
- (lsapic->id << 8) | lsapic->eid;
-#endif
- ++available_cpus;
- }
-
- total_cpus++;
- return 0;
-}
-
-static int __init
-acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
-{
- struct acpi_madt_local_apic_nmi *lacpi_nmi;
-
- lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
-
- if (BAD_MADT_ENTRY(lacpi_nmi, end))
- return -EINVAL;
-
- /* TBD: Support lapic_nmi entries */
- return 0;
-}
-
-static int __init
-acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end)
-{
- struct acpi_madt_io_sapic *iosapic;
-
- iosapic = (struct acpi_madt_io_sapic *)header;
-
- if (BAD_MADT_ENTRY(iosapic, end))
- return -EINVAL;
-
-#ifndef XEN
- return iosapic_init(iosapic->address, iosapic->global_irq_base);
-#else
- return iosapic_init(iosapic->address, iosapic->global_irq_base,
- iosapic->id);
-#endif
-}
-
-static unsigned int __initdata acpi_madt_rev;
-
-static int __init
-acpi_parse_plat_int_src(struct acpi_subtable_header * header,
- const unsigned long end)
-{
- struct acpi_madt_interrupt_source *plintsrc;
- int vector;
-
- plintsrc = (struct acpi_madt_interrupt_source *)header;
-
- if (BAD_MADT_ENTRY(plintsrc, end))
- return -EINVAL;
-
- /*
- * Get vector assignment for this interrupt, set attributes,
- * and program the IOSAPIC routing table.
- */
- vector = iosapic_register_platform_intr(plintsrc->type,
- plintsrc->global_irq,
- plintsrc->io_sapic_vector,
- plintsrc->eid,
- plintsrc->id,
- ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
- ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
- IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
- ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
- ACPI_MADT_TRIGGER_EDGE) ?
- IOSAPIC_EDGE : IOSAPIC_LEVEL);
-
- platform_intr_list[plintsrc->type] = vector;
- if (acpi_madt_rev > 1) {
- acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
- }
-
- /*
- * Save the physical id, so we can check when its being removed
- */
- acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff;
-
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-#ifdef XEN
-unsigned int force_cpei_retarget = 0;
-#endif
-
-unsigned int can_cpei_retarget(void)
-{
- extern int cpe_vector;
- extern unsigned int force_cpei_retarget;
-
- /*
- * Only if CPEI is supported and the override flag
- * is present, otherwise return that its re-targettable
- * if we are in polling mode.
- */
- if (cpe_vector > 0) {
- if (acpi_cpei_override || force_cpei_retarget)
- return 1;
- else
- return 0;
- }
- return 1;
-}
-
-unsigned int is_cpu_cpei_target(unsigned int cpu)
-{
- unsigned int logical_id;
-
- logical_id = cpu_logical_id(acpi_cpei_phys_cpuid);
-
- if (logical_id == cpu)
- return 1;
- else
- return 0;
-}
-
-void set_cpei_target_cpu(unsigned int cpu)
-{
- acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
-}
-#endif
-
-unsigned int get_cpei_target_cpu(void)
-{
- return acpi_cpei_phys_cpuid;
-}
-
-static int __init
-acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
- const unsigned long end)
-{
- struct acpi_madt_interrupt_override *p;
-
- p = (struct acpi_madt_interrupt_override *)header;
-
- if (BAD_MADT_ENTRY(p, end))
- return -EINVAL;
-
- iosapic_override_isa_irq(p->source_irq, p->global_irq,
- ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
- ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
- IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
- ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
- ACPI_MADT_TRIGGER_EDGE) ?
- IOSAPIC_EDGE : IOSAPIC_LEVEL);
- return 0;
-}
-
-static int __init
-acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
-{
- struct acpi_madt_nmi_source *nmi_src;
-
- nmi_src = (struct acpi_madt_nmi_source *)header;
-
- if (BAD_MADT_ENTRY(nmi_src, end))
- return -EINVAL;
-
- /* TBD: Support nimsrc entries */
- return 0;
-}
-
-static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) {
-
- /*
- * Unfortunately ITC_DRIFT is not yet part of the
- * official SAL spec, so the ITC_DRIFT bit is not
- * set by the BIOS on this hardware.
- */
- sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
-
-#ifndef XEN
- cyclone_setup();
-#endif
- }
-}
-
-static int __init acpi_parse_madt(struct acpi_table_header *table)
-{
- if (!table)
- return -EINVAL;
-
- acpi_madt = (struct acpi_table_madt *)table;
-
- acpi_madt_rev = acpi_madt->header.revision;
-
- /* remember the value for reference after free_initmem() */
-#ifdef CONFIG_ITANIUM
- has_8259 = 1; /* Firmware on old Itanium systems is broken */
-#else
- has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
-#endif
- iosapic_system_init(has_8259);
-
- /* Get base address of IPI Message Block */
-
- if (acpi_madt->address)
- ipi_base_addr = ioremap(acpi_madt->address, 0);
-
- printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
-
- acpi_madt_oem_check(acpi_madt->header.oem_id,
- acpi_madt->header.oem_table_id);
-
- return 0;
-}
-
-#ifdef CONFIG_ACPI_NUMA
-
-#undef SLIT_DEBUG
-
-#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
-
-static int __initdata srat_num_cpus; /* number of cpus */
-static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
-#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
-#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
-static struct acpi_table_slit __initdata *slit_table;
-cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
-
-static int __init
-get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
-{
- int pxm;
-
- pxm = pa->proximity_domain_lo;
- if (srat_rev >= 2) {
- pxm += pa->proximity_domain_hi[0] << 8;
- pxm += pa->proximity_domain_hi[1] << 16;
- pxm += pa->proximity_domain_hi[2] << 24;
- } else if (ia64_platform_is("sn2"))
- pxm += pa->proximity_domain_hi[0] << 8;
- return pxm;
-}
-
-static int __init
-get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
-{
- int pxm;
-
- pxm = ma->proximity_domain;
- if (!ia64_platform_is("sn2") && srat_rev < 2)
- pxm &= 0xff;
-
- return pxm;
-}
-
-/*
- * ACPI 2.0 SLIT (System Locality Information Table)
- * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
- */
-void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
-{
- u32 len;
-
- len = sizeof(struct acpi_table_header) + 8
- + slit->locality_count * slit->locality_count;
- if (slit->header.length != len) {
- printk(KERN_ERR
- "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
- len, slit->header.length);
- memset(numa_slit, 10, sizeof(numa_slit));
- return;
- }
- slit_table = slit;
-}
-
-void __init
-acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
-{
- int pxm;
-
- if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
- return;
-
- pxm = get_processor_proximity_domain(pa);
-
- /* record this node in proximity bitmap */
- pxm_bit_set(pxm);
-
- node_cpuid[srat_num_cpus].phys_id =
- (pa->apic_id << 8) | (pa->local_sapic_eid);
- /* nid should be overridden as logical node id later */
- node_cpuid[srat_num_cpus].nid = pxm;
- cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
- srat_num_cpus++;
-}
-
-void __init
-acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
-{
- unsigned long paddr, size;
- int pxm;
- struct node_memblk_s *p, *q, *pend;
-
- pxm = get_memory_proximity_domain(ma);
-
- /* fill node memory chunk structure */
- paddr = ma->base_address;
- size = ma->length;
-
- /* Ignore disabled entries */
- if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
- return;
-
- /* record this node in proximity bitmap */
- pxm_bit_set(pxm);
-
- /* Insertion sort based on base address */
- pend = &node_memblk[num_node_memblks];
- for (p = &node_memblk[0]; p < pend; p++) {
- if (paddr < p->start_paddr)
- break;
- }
- if (p < pend) {
- for (q = pend - 1; q >= p; q--)
- *(q + 1) = *q;
- }
- p->start_paddr = paddr;
- p->size = size;
- p->nid = pxm;
- num_node_memblks++;
-}
-
-void __init acpi_numa_arch_fixup(void)
-{
- int i, j, node_from, node_to;
-
- /* If there's no SRAT, fix the phys_id and mark node 0 online */
- if (srat_num_cpus == 0) {
- node_set_online(0);
- node_cpuid[0].phys_id = hard_smp_processor_id();
- return;
- }
-
- /*
- * MCD - This can probably be dropped now. No need for pxm ID to node ID
- * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
- */
- nodes_clear(node_online_map);
- for (i = 0; i < MAX_PXM_DOMAINS; i++) {
- if (pxm_bit_test(i)) {
- int nid = acpi_map_pxm_to_node(i);
- node_set_online(nid);
- }
- }
-
- /* set logical node id in memory chunk structure */
- for (i = 0; i < num_node_memblks; i++)
- node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
-
- /* assign memory bank numbers for each chunk on each node */
- for_each_online_node(i) {
- int bank;
-
- bank = 0;
- for (j = 0; j < num_node_memblks; j++)
- if (node_memblk[j].nid == i)
- node_memblk[j].bank = bank++;
- }
-
- /* set logical node id in cpu structure */
- for_each_possible_early_cpu(i)
- node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
-
- printk(KERN_INFO "Number of logical nodes in system = %d\n",
- num_online_nodes());
- printk(KERN_INFO "Number of memory chunks in system = %d\n",
- num_node_memblks);
-
- if (!slit_table)
- return;
- memset(numa_slit, -1, sizeof(numa_slit));
- for (i = 0; i < slit_table->locality_count; i++) {
- if (!pxm_bit_test(i))
- continue;
- node_from = pxm_to_node(i);
- for (j = 0; j < slit_table->locality_count; j++) {
- if (!pxm_bit_test(j))
- continue;
- node_to = pxm_to_node(j);
- node_distance(node_from, node_to) =
- slit_table->entry[i * slit_table->locality_count + j];
- }
- }
-
-#ifdef SLIT_DEBUG
- printk("ACPI 2.0 SLIT locality table:\n");
- for_each_online_node(i) {
- for_each_online_node(j)
- printk("%03d ", node_distance(i, j));
- printk("\n");
- }
-#endif
-}
-#endif /* CONFIG_ACPI_NUMA */
-
-#ifndef XEN
-/*
- * success: return IRQ number (>=0)
- * failure: return < 0
- */
-int acpi_register_gsi(u32 gsi, int triggering, int polarity)
-{
- if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
- return gsi;
-
- if (has_8259 && gsi < 16)
- return isa_irq_to_vector(gsi);
-
- return iosapic_register_intr(gsi,
- (polarity ==
- ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
- IOSAPIC_POL_LOW,
- (triggering ==
- ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
- IOSAPIC_LEVEL);
-}
-
-void acpi_unregister_gsi(u32 gsi)
-{
- if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
- return;
-
- if (has_8259 && gsi < 16)
- return;
-
- iosapic_unregister_intr(gsi);
-}
-#endif
-
-static int __init acpi_parse_fadt(struct acpi_table_header *table)
-{
- struct acpi_table_header *fadt_header;
- struct acpi_table_fadt *fadt;
-
- if (!table)
- return -EINVAL;
-
- fadt_header = (struct acpi_table_header *)table;
- if (fadt_header->revision != 3)
- return -ENODEV; /* Only deal with ACPI 2.0 FADT */
-
- fadt = (struct acpi_table_fadt *)fadt_header;
-
-#ifndef XEN
- acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
-#endif
- return 0;
-}
-
-int __init acpi_boot_init(void)
-{
-
- /*
- * MADT
- * ----
- * Parse the Multiple APIC Description Table (MADT), if exists.
- * Note that this table provides platform SMP configuration
- * information -- the successor to MPS tables.
- */
-
- if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
- printk(KERN_ERR PREFIX "Can't find MADT\n");
- goto skip_madt;
- }
-
- /* Local APIC */
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC address override entry\n");
-
- if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
- < 1)
- printk(KERN_ERR PREFIX
- "Error parsing MADT - no LAPIC entries\n");
-
- if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
- < 0)
- printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
-
- /* I/O APIC */
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) {
- if (!ia64_platform_is("sn2"))
- printk(KERN_ERR PREFIX
- "Error parsing MADT - no IOSAPIC entries\n");
- }
-
- /* System-Level Interrupt Routing */
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
- ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
- printk(KERN_ERR PREFIX
- "Error parsing platform interrupt source entry\n");
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
- printk(KERN_ERR PREFIX
- "Error parsing interrupt source overrides entry\n");
-
- if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
- printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
- skip_madt:
-
- /*
- * FADT says whether a legacy keyboard controller is present.
- * The FADT also contains an SCI_INT line, by which the system
- * gets interrupts such as power and sleep buttons. If it's not
- * on a Legacy interrupt, it needs to be setup.
- */
- if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
- printk(KERN_ERR PREFIX "Can't find FADT\n");
-
-#ifdef XEN
- acpi_dmar_init();
-#endif
-
-#ifdef CONFIG_SMP
- if (available_cpus == 0) {
- printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
- printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
- smp_boot_data.cpu_phys_id[available_cpus] =
- hard_smp_processor_id();
- available_cpus = 1; /* We've got at least one of these, no? */
- }
- smp_boot_data.cpu_count = available_cpus;
-
- smp_build_cpu_map();
-# ifdef CONFIG_ACPI_NUMA
- if (srat_num_cpus == 0) {
- int cpu, i = 1;
- for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
- if (smp_boot_data.cpu_phys_id[cpu] !=
- hard_smp_processor_id())
- node_cpuid[i++].phys_id =
- smp_boot_data.cpu_phys_id[cpu];
- }
-# endif
-#endif
-#ifdef CONFIG_ACPI_NUMA
- build_cpu_to_node_map();
-#endif
- /* Make boot-up look pretty */
- printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
- total_cpus);
- return 0;
-}
-
-int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
-{
- int tmp;
-
- if (has_8259 && gsi < 16)
- *irq = isa_irq_to_vector(gsi);
- else {
- tmp = gsi_to_irq(gsi);
- if (tmp == -1)
- return -1;
- *irq = tmp;
- }
- return 0;
-}
-
-/*
- * ACPI based hotplug CPU support
- */
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static
-int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
-{
-#ifdef CONFIG_ACPI_NUMA
- int pxm_id;
- int nid;
-
- pxm_id = acpi_get_pxm(handle);
- /*
- * We don't have cpu-only-node hotadd. But if the system equips
- * SRAT table, pxm is already found and node is ready.
- * So, just pxm_to_nid(pxm) is OK.
- * This code here is for the system which doesn't have full SRAT
- * table for possible cpus.
- */
- nid = acpi_map_pxm_to_node(pxm_id);
- node_cpuid[cpu].phys_id = physid;
- node_cpuid[cpu].nid = nid;
-#endif
- return (0);
-}
-
-int additional_cpus __initdata = -1;
-
-static __init int setup_additional_cpus(char *s)
-{
- if (s)
- additional_cpus = simple_strtol(s, NULL, 0);
-
- return 0;
-}
-
-early_param("additional_cpus", setup_additional_cpus);
-
-/*
- * cpu_possible_map should be static, it cannot change as CPUs
- * are onlined, or offlined. The reason is per-cpu data-structures
- * are allocated by some modules at init time, and dont expect to
- * do this dynamically on cpu arrival/departure.
- * cpu_present_map on the other hand can change dynamically.
- * In case when cpu_hotplug is not compiled, then we resort to current
- * behaviour, which is cpu_possible == cpu_present.
- * - Ashok Raj
- *
- * Three ways to find out the number of additional hotplug CPUs:
- * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
- * - The user can overwrite it with additional_cpus=NUM
- * - Otherwise don't reserve additional CPUs.
- */
-__init void prefill_possible_map(void)
-{
- int i;
- int possible, disabled_cpus;
-
- disabled_cpus = total_cpus - available_cpus;
-
- if (additional_cpus == -1) {
- if (disabled_cpus > 0)
- additional_cpus = disabled_cpus;
- else
- additional_cpus = 0;
- }
-
- possible = available_cpus + additional_cpus;
-
- if (possible > NR_CPUS)
- possible = NR_CPUS;
-
- printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
- possible, max((possible - available_cpus), 0));
-
- for (i = 0; i < possible; i++)
- cpumask_set_cpu(i, &cpu_possible_map);
-}
-
-#ifndef XEN
-int acpi_map_lsapic(acpi_handle handle, int *pcpu)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- struct acpi_madt_local_sapic *lsapic;
- cpumask_t tmp_map;
- long physid;
- int cpu;
-
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
- return -EINVAL;
-
- if (!buffer.length || !buffer.pointer)
- return -EINVAL;
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_BUFFER)
- {
- kfree(buffer.pointer);
- return -EINVAL;
- }
-
- lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
-
- if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
- (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) {
- kfree(buffer.pointer);
- return -EINVAL;
- }
-
- physid = ((lsapic->id << 8) | (lsapic->eid));
-
- kfree(buffer.pointer);
- buffer.length = ACPI_ALLOCATE_BUFFER;
- buffer.pointer = NULL;
-
- cpumask_complement(&tmp_map, &cpu_present_map);
- cpu = cpumask_first(&tmp_map);
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
-
- acpi_map_cpu2node(handle, cpu, physid);
-
- cpumask_set_cpu(cpu, &cpu_present_map);
- ia64_cpu_to_sapicid[cpu] = physid;
-
- *pcpu = cpu;
- return (0);
-}
-
-EXPORT_SYMBOL(acpi_map_lsapic);
-
-int acpi_unmap_lsapic(int cpu)
-{
- ia64_cpu_to_sapicid[cpu] = -1;
- cpumask_clear_cpu(cpu, &cpu_present_map);
-
-#ifdef CONFIG_ACPI_NUMA
- /* NUMA specific cleanup's */
-#endif
-
- return (0);
-}
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
-#endif /* XEN */
-#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-
-#ifndef XEN
-#ifdef CONFIG_ACPI_NUMA
-static acpi_status __devinit
-acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- struct acpi_madt_io_sapic *iosapic;
- unsigned int gsi_base;
- int pxm, node;
-
- /* Only care about objects w/ a method that returns the MADT */
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
- return AE_OK;
-
- if (!buffer.length || !buffer.pointer)
- return AE_OK;
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_BUFFER ||
- obj->buffer.length < sizeof(*iosapic)) {
- kfree(buffer.pointer);
- return AE_OK;
- }
-
- iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
-
- if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
- kfree(buffer.pointer);
- return AE_OK;
- }
-
- gsi_base = iosapic->global_irq_base;
-
- kfree(buffer.pointer);
-
- /*
- * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
- * us which node to associate this with.
- */
- pxm = acpi_get_pxm(handle);
- if (pxm < 0)
- return AE_OK;
-
- node = pxm_to_node(pxm);
-
- if (node >= MAX_NUMNODES || !node_online(node) ||
- cpus_empty(node_to_cpumask(node)))
- return AE_OK;
-
- /* We know a gsi to node mapping! */
- map_iosapic_to_node(gsi_base, node);
- return AE_OK;
-}
-
-static int __init
-acpi_map_iosapics (void)
-{
- acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL);
- return 0;
-}
-
-fs_initcall(acpi_map_iosapics);
-#endif /* CONFIG_ACPI_NUMA */
-
-int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
-{
- int err;
-
- if ((err = iosapic_init(phys_addr, gsi_base)))
- return err;
-
-#ifdef CONFIG_ACPI_NUMA
- acpi_map_iosapic(handle, 0, NULL, NULL);
-#endif /* CONFIG_ACPI_NUMA */
-
- return 0;
-}
-
-EXPORT_SYMBOL(acpi_register_ioapic);
-
-int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
-{
- return iosapic_remove(gsi_base);
-}
-
-EXPORT_SYMBOL(acpi_unregister_ioapic);
-#endif /* XEN */
-
-/*
- * acpi_save_state_mem() - save kernel state
- *
- * TBD when when IA64 starts to support suspend...
- */
-int acpi_save_state_mem(void) { return 0; }
-
-/*
- * acpi_restore_state()
- */
-void acpi_restore_state_mem(void) {}
-
-/*
- * do_suspend_lowlevel()
- */
-void do_suspend_lowlevel(void) {}
-
-#endif /* CONFIG_ACPI */
diff --git a/xen/arch/ia64/linux-xen/acpi_numa.c b/xen/arch/ia64/linux-xen/acpi_numa.c
deleted file mode 100644
index 802b9bea95..0000000000
--- a/xen/arch/ia64/linux-xen/acpi_numa.c
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * acpi_numa.c - ACPI NUMA support
- *
- * Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/acpi.h>
-#include <acpi/acmacros.h>
-
-#ifndef XEN
-#define ACPI_NUMA 0x80000000
-#define _COMPONENT ACPI_NUMA
-ACPI_MODULE_NAME("numa");
-#else
-#define NID_INVAL -1
-#define PXM_INVAL -1
-#endif
-
-#ifndef XEN
-static nodemask_t nodes_found_map = NODE_MASK_NONE;
-#else
-/* the above causes error: initializer element is not constant
- * anyway NODE_MASK_NONE is 0 filled array.
- */
-static nodemask_t nodes_found_map;
-#endif
-
-/* maps to convert between proximity domain and logical node ID */
-static int pxm_to_node_map[MAX_PXM_DOMAINS]
- = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
-static int node_to_pxm_map[MAX_NUMNODES]
- = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
-
-int pxm_to_node(int pxm)
-{
- if (pxm < 0)
- return NID_INVAL;
- return pxm_to_node_map[pxm];
-}
-
-int node_to_pxm(int node)
-{
- if (node < 0)
- return PXM_INVAL;
- return node_to_pxm_map[node];
-}
-
-void __acpi_map_pxm_to_node(int pxm, int node)
-{
- pxm_to_node_map[pxm] = node;
- node_to_pxm_map[node] = pxm;
-}
-
-int acpi_map_pxm_to_node(int pxm)
-{
- int node = pxm_to_node_map[pxm];
-
- if (node < 0){
- if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
- return NID_INVAL;
- node = first_unset_node(nodes_found_map);
- __acpi_map_pxm_to_node(pxm, node);
- node_set(node, nodes_found_map);
- }
-
- return node;
-}
-
-#ifndef XEN
-#if 0
-void __cpuinit acpi_unmap_pxm_to_node(int node)
-{
- int pxm = node_to_pxm_map[node];
- pxm_to_node_map[pxm] = NID_INVAL;
- node_to_pxm_map[node] = PXM_INVAL;
- node_clear(node, nodes_found_map);
-}
-#endif /* 0 */
-
-static void __init
-acpi_table_print_srat_entry(struct acpi_subtable_header *header)
-{
-
- ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
-
- if (!header)
- return;
-
- switch (header->type) {
-
- case ACPI_SRAT_TYPE_CPU_AFFINITY:
-#ifdef ACPI_DEBUG_OUTPUT
- {
- struct acpi_srat_cpu_affinity *p =
- (struct acpi_srat_cpu_affinity *)header;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
- p->apic_id, p->local_sapic_eid,
- p->proximity_domain_lo,
- (p->flags & ACPI_SRAT_CPU_ENABLED)?
- "enabled" : "disabled"));
- }
-#endif /* ACPI_DEBUG_OUTPUT */
- break;
-
- case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
-#ifdef ACPI_DEBUG_OUTPUT
- {
- struct acpi_srat_mem_affinity *p =
- (struct acpi_srat_mem_affinity *)header;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Memory (0x%lx length 0x%lx type 0x%x) in proximity domain %d %s%s\n",
- (unsigned long)p->base_address,
- (unsigned long)p->length,
- p->memory_type, p->proximity_domain,
- (p->flags & ACPI_SRAT_MEM_ENABLED)?
- "enabled" : "disabled",
- (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
- " hot-pluggable" : ""));
- }
-#endif /* ACPI_DEBUG_OUTPUT */
- break;
-
- default:
- printk(KERN_WARNING PREFIX
- "Found unsupported SRAT entry (type = 0x%x)\n",
- header->type);
- break;
- }
-}
-
-static int __init acpi_parse_slit(struct acpi_table_header *table)
-{
- struct acpi_table_slit *slit;
- u32 localities;
-
- if (!table)
- return -EINVAL;
-
- slit = (struct acpi_table_slit *)table;
-
- /* downcast just for %llu vs %lu for i386/ia64 */
- localities = (u32) slit->locality_count;
-
- acpi_numa_slit_init(slit);
-
- return 0;
-}
-
-static int __init
-acpi_parse_processor_affinity(struct acpi_subtable_header * header,
- const unsigned long end)
-{
- struct acpi_srat_cpu_affinity *processor_affinity;
-
- processor_affinity = (struct acpi_srat_cpu_affinity *)header;
- if (!processor_affinity)
- return -EINVAL;
-
- acpi_table_print_srat_entry(header);
-
- /* let architecture-dependent part to do it */
- acpi_numa_processor_affinity_init(processor_affinity);
-
- return 0;
-}
-
-static int __init
-acpi_parse_memory_affinity(struct acpi_subtable_header * header,
- const unsigned long end)
-{
- struct acpi_srat_mem_affinity *memory_affinity;
-
- memory_affinity = (struct acpi_srat_mem_affinity *)header;
- if (!memory_affinity)
- return -EINVAL;
-
- acpi_table_print_srat_entry(header);
-
- /* let architecture-dependent part to do it */
- acpi_numa_memory_affinity_init(memory_affinity);
-
- return 0;
-}
-
-static int __init acpi_parse_srat(struct acpi_table_header *table)
-{
- struct acpi_table_srat *srat;
-
- if (!table)
- return -EINVAL;
-
- srat = (struct acpi_table_srat *)table;
-
- return 0;
-}
-
-static int __init
-acpi_table_parse_srat(enum acpi_srat_type id,
- acpi_table_entry_handler handler, unsigned int max_entries)
-{
- return acpi_table_parse_entries(ACPI_SIG_SRAT,
- sizeof(struct acpi_table_srat), id,
- handler, max_entries);
-}
-
-int __init acpi_numa_init(void)
-{
- /* SRAT: Static Resource Affinity Table */
- if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, NR_CPUS);
- acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
- acpi_parse_memory_affinity,
- NR_NODE_MEMBLKS);
- }
-
- /* SLIT: System Locality Information Table */
- acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
-
- acpi_numa_arch_fixup();
- return 0;
-}
-
-int acpi_get_pxm(acpi_handle h)
-{
- unsigned long pxm;
- acpi_status status;
- acpi_handle handle;
- acpi_handle phandle = h;
-
- do {
- handle = phandle;
- status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
- if (ACPI_SUCCESS(status))
- return pxm;
- status = acpi_get_parent(handle, &phandle);
- } while (ACPI_SUCCESS(status));
- return -1;
-}
-
-int acpi_get_node(acpi_handle *handle)
-{
- int pxm, node = -1;
-
- pxm = acpi_get_pxm(handle);
- if (pxm >= 0)
- node = acpi_map_pxm_to_node(pxm);
-
- return node;
-}
-EXPORT_SYMBOL(acpi_get_node);
-#endif /* XEN */
diff --git a/xen/arch/ia64/linux-xen/cmdline.c b/xen/arch/ia64/linux-xen/cmdline.c
deleted file mode 100644
index 48e6de1ccf..0000000000
--- a/xen/arch/ia64/linux-xen/cmdline.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * linux/lib/cmdline.c
- * Helper functions generally used for parsing kernel command line
- * and module options.
- *
- * Code and copyrights come from init/main.c and arch/i386/kernel/setup.c.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
- *
- * GNU Indent formatting options for this file: -kr -i8 -npsl -pcs
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#ifdef XEN
-#include <xen/lib.h>
-#endif
-
-
-/**
- * get_option - Parse integer from an option string
- * @str: option string
- * @pint: (output) integer value parsed from @str
- *
- * Read an int from an option string; if available accept a subsequent
- * comma as well.
- *
- * Return values:
- * 0 : no int in string
- * 1 : int found, no subsequent comma
- * 2 : int found including a subsequent comma
- */
-
-int get_option (char **str, int *pint)
-{
- char *cur = *str;
-
- if (!cur || !(*cur))
- return 0;
-#ifndef XEN
- *pint = simple_strtol (cur, str, 0);
-#else
- *pint = simple_strtol (cur, (const char**)str, 0);
-#endif
- if (cur == *str)
- return 0;
- if (**str == ',') {
- (*str)++;
- return 2;
- }
-
- return 1;
-}
-
-/**
- * get_options - Parse a string into a list of integers
- * @str: String to be parsed
- * @nints: size of integer array
- * @ints: integer array
- *
- * This function parses a string containing a comma-separated
- * list of integers. The parse halts when the array is
- * full, or when no more numbers can be retrieved from the
- * string.
- *
- * Return value is the character in the string which caused
- * the parse to end (typically a null terminator, if @str is
- * completely parseable).
- */
-
-char *get_options(const char *str, int nints, int *ints)
-{
- int res, i = 1;
-
- while (i < nints) {
- res = get_option ((char **)&str, ints + i);
- if (res == 0)
- break;
- i++;
- if (res == 1)
- break;
- }
- ints[0] = i - 1;
- return (char *)str;
-}
-
-/**
- * memparse - parse a string with mem suffixes into a number
- * @ptr: Where parse begins
- * @retptr: (output) Pointer to next char after parse completes
- *
- * Parses a string into a number. The number stored at @ptr is
- * potentially suffixed with %K (for kilobytes, or 1024 bytes),
- * %M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or
- * 1073741824). If the number is suffixed with K, M, or G, then
- * the return value is the number multiplied by one kilobyte, one
- * megabyte, or one gigabyte, respectively.
- */
-
-unsigned long long memparse (char *ptr, char **retptr)
-{
-#ifndef XEN
- unsigned long long ret = simple_strtoull (ptr, retptr, 0);
-#else
- unsigned long long ret = simple_strtoull (ptr, (const char**)retptr, 0);
-#endif
-
- switch (**retptr) {
- case 'G':
- case 'g':
- ret <<= 10;
- case 'M':
- case 'm':
- ret <<= 10;
- case 'K':
- case 'k':
- ret <<= 10;
- (*retptr)++;
- default:
- break;
- }
- return ret;
-}
-
-
-EXPORT_SYMBOL(memparse);
-EXPORT_SYMBOL(get_option);
-EXPORT_SYMBOL(get_options);
diff --git a/xen/arch/ia64/linux-xen/efi.c b/xen/arch/ia64/linux-xen/efi.c
deleted file mode 100644
index 3f0717489c..0000000000
--- a/xen/arch/ia64/linux-xen/efi.c
+++ /dev/null
@@ -1,1334 +0,0 @@
-/*
- * Extensible Firmware Interface
- *
- * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999-2003 Hewlett-Packard Co.
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * All EFI Runtime Services are not implemented yet as EFI only
- * supports physical mode addressing on SoftSDV. This is to be fixed
- * in a future version. --drummond 1999-07-20
- *
- * Implemented EFI runtime services and virtual mode calls. --davidm
- *
- * Goutham Rao: <goutham.rao@intel.com>
- * Skip non-WB memory and ignore empty memory ranges.
- */
-#include <linux/module.h>
-#include <linux/bootmem.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/efi.h>
-#include <linux/kexec.h>
-
-#include <asm/io.h>
-#include <asm/kregs.h>
-#include <asm/meminit.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/mca.h>
-
-#define EFI_DEBUG 0
-
-extern efi_status_t efi_call_phys (void *, ...);
-#ifdef XEN
-/* this should be defined in linux/kernel.h */
-extern unsigned long long memparse (char *ptr, char **retptr);
-/* this should be defined in linux/efi.h */
-//#define EFI_INVALID_TABLE_ADDR (void *)(~0UL)
-#endif
-
-struct efi efi;
-EXPORT_SYMBOL(efi);
-static efi_runtime_services_t *runtime;
-#if defined(XEN) && !defined(CONFIG_VIRTUAL_FRAME_TABLE)
-// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
-static unsigned long mem_limit = ~0UL, max_addr = 0x100000000UL, min_addr = 0UL;
-#else
-static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
-#endif
-
-#define efi_call_virt(f, args...) (*(f))(args)
-
-#define STUB_GET_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_time_cap_t *atc = NULL; \
- efi_status_t ret; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- \
- if (tc) \
- atc = adjust_arg(tc); \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_SET_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_set_time (efi_time_t *tm) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm)); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
- adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_time_t *atm = NULL; \
- efi_status_t ret; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- \
- if (tm) \
- atm = adjust_arg(tm); \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
- enabled, atm); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_GET_VARIABLE(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
- unsigned long *data_size, void *data) \
-{ \
- struct ia64_fpreg fr[6]; \
- u32 *aattr = NULL; \
- efi_status_t ret; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- \
- if (attr) \
- aattr = adjust_arg(attr); \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \
- adjust_arg(name), adjust_arg(vendor), aattr, \
- adjust_arg(data_size), adjust_arg(data)); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable), \
- adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_SET_VARIABLE(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr, \
- unsigned long data_size, void *data) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable), \
- adjust_arg(name), adjust_arg(vendor), attr, data_size, \
- adjust_arg(data)); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_next_high_mono_count (u32 *count) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
- __va(runtime->get_next_high_mono_count), adjust_arg(count)); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
-}
-
-#define STUB_RESET_SYSTEM(prefix, adjust_arg) \
-static void \
-prefix##_reset_system (int reset_type, efi_status_t status, \
- unsigned long data_size, efi_char16_t *data) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_char16_t *adata = NULL; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- \
- if (data) \
- adata = adjust_arg(data); \
- \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \
- reset_type, status, data_size, adata); \
- /* should not return, but just in case... */ \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
-}
-
-#define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
-
-STUB_GET_TIME(phys, phys_ptr)
-STUB_SET_TIME(phys, phys_ptr)
-STUB_GET_WAKEUP_TIME(phys, phys_ptr)
-STUB_SET_WAKEUP_TIME(phys, phys_ptr)
-STUB_GET_VARIABLE(phys, phys_ptr)
-STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
-STUB_SET_VARIABLE(phys, phys_ptr)
-STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
-STUB_RESET_SYSTEM(phys, phys_ptr)
-
-#define id(arg) arg
-
-STUB_GET_TIME(virt, id)
-STUB_SET_TIME(virt, id)
-STUB_GET_WAKEUP_TIME(virt, id)
-STUB_SET_WAKEUP_TIME(virt, id)
-STUB_GET_VARIABLE(virt, id)
-STUB_GET_NEXT_VARIABLE(virt, id)
-STUB_SET_VARIABLE(virt, id)
-STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
-STUB_RESET_SYSTEM(virt, id)
-
-#ifndef XEN
-void
-efi_gettimeofday (struct timespec *ts)
-{
- efi_time_t tm;
-
- memset(ts, 0, sizeof(ts));
- if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS)
- return;
-
- ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
- ts->tv_nsec = tm.nanosecond;
-}
-#endif
-
-static int
-is_memory_available (efi_memory_desc_t *md)
-{
- if (!(md->attribute & EFI_MEMORY_WB))
- return 0;
-
- switch (md->type) {
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- case EFI_BOOT_SERVICES_CODE:
- case EFI_BOOT_SERVICES_DATA:
- case EFI_CONVENTIONAL_MEMORY:
- return 1;
- }
- return 0;
-}
-
-typedef struct kern_memdesc {
- u64 attribute;
- u64 start;
- u64 num_pages;
-} kern_memdesc_t;
-
-static kern_memdesc_t *kern_memmap;
-
-#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
-
-static inline u64
-kmd_end(kern_memdesc_t *kmd)
-{
- return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
-}
-
-static inline u64
-efi_md_end(efi_memory_desc_t *md)
-{
- return (md->phys_addr + efi_md_size(md));
-}
-
-static inline int
-efi_wb(efi_memory_desc_t *md)
-{
- return (md->attribute & EFI_MEMORY_WB);
-}
-
-static inline int
-efi_uc(efi_memory_desc_t *md)
-{
- return (md->attribute & EFI_MEMORY_UC);
-}
-
-static void
-walk (efi_freemem_callback_t callback, void *arg, u64 attr)
-{
- kern_memdesc_t *k;
- u64 start, end, voff;
-
- voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET;
- for (k = kern_memmap; k->start != ~0UL; k++) {
- if (k->attribute != attr)
- continue;
- start = PAGE_ALIGN(k->start);
- end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
- if (start < end)
- if ((*callback)(start + voff, end + voff, arg) < 0)
- return;
- }
-}
-
-/*
- * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
- * has memory that is available for OS use.
- */
-void
-efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
-{
- walk(callback, arg, EFI_MEMORY_WB);
-}
-
-/*
- * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
- * has memory that is available for uncached allocator.
- */
-void
-efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
-{
- walk(callback, arg, EFI_MEMORY_UC);
-}
-
-/*
- * Look for the PAL_CODE region reported by EFI and maps it using an
- * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
- * Abstraction Layer chapter 11 in ADAG
- */
-
-#ifdef XEN
-static void *
-__efi_get_pal_addr (void)
-#else
-void *
-efi_get_pal_addr (void)
-#endif
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
- int pal_code_count = 0;
- u64 vaddr, mask;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (md->type != EFI_PAL_CODE)
- continue;
-
- if (++pal_code_count > 1) {
- printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
- md->phys_addr);
- continue;
- }
- /*
- * The only ITLB entry in region 7 that is used is the one installed by
- * __start(). That entry covers a 64MB range.
- */
- mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
- vaddr = PAGE_OFFSET + md->phys_addr;
-
- /*
- * We must check that the PAL mapping won't overlap with the kernel
- * mapping.
- *
- * PAL code is guaranteed to be aligned on a power of 2 between 4k and
- * 256KB and that only one ITR is needed to map it. This implies that the
- * PAL code is always aligned on its size, i.e., the closest matching page
- * size supported by the TLB. Therefore PAL code is guaranteed never to
- * cross a 64MB unless it is bigger than 64MB (very unlikely!). So for
- * now the following test is enough to determine whether or not we need a
- * dedicated ITR for the PAL code.
- */
- if ((vaddr & mask) == (KERNEL_START & mask)) {
- printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
- __FUNCTION__);
- continue;
- }
-
- if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
- panic("Woah! PAL code size bigger than a granule!");
-
-#if EFI_DEBUG
- mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
-
- printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
- smp_processor_id(), md->phys_addr,
- md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
- vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
-#endif
- return __va_efi(md->phys_addr);
- }
- printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
- __FUNCTION__);
- return NULL;
-}
-
-#ifdef XEN
-static void *pal_vaddr = 0;
-
-void *
-efi_get_pal_addr(void)
-{
- if (!pal_vaddr)
- pal_vaddr = __efi_get_pal_addr();
- return pal_vaddr;
-}
-#endif
-
-#ifdef XEN
-static void
-__efi_unmap_pal_code (void *pal_vaddr)
-{
- ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned long)pal_vaddr),
- IA64_GRANULE_SHIFT);
-}
-
-void
-efi_unmap_pal_code (void)
-{
- void *pal_vaddr = efi_get_pal_addr ();
- u64 psr;
-
- if (!pal_vaddr)
- return;
-
- /*
- * Cannot write to CRx with PSR.ic=1
- */
- psr = ia64_clear_ic();
- __efi_unmap_pal_code(pal_vaddr);
- ia64_set_psr(psr); /* restore psr */
- ia64_srlz_i();
-}
-#endif
-
-void
-efi_map_pal_code (void)
-{
- void *pal_vaddr = efi_get_pal_addr ();
- u64 psr;
-
- if (!pal_vaddr)
- return;
-
- /*
- * Cannot write to CRx with PSR.ic=1
- */
- psr = ia64_clear_ic();
-#ifdef XEN
- /* pal_vaddr must be unpinned before pinning
- * This is needed in the case of a nested EFI, PAL or SAL call */
- __efi_unmap_pal_code(pal_vaddr);
-#endif
- ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
- pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
- IA64_GRANULE_SHIFT);
- ia64_set_psr(psr); /* restore psr */
- ia64_srlz_i();
-}
-
-void __init
-efi_init (void)
-{
- void *efi_map_start, *efi_map_end;
- efi_config_table_t *config_tables;
- efi_char16_t *c16;
- u64 efi_desc_size;
- char *cp, vendor[100] = "unknown";
- int i;
-
- /* it's too early to be able to use the standard kernel command line support... */
-#ifdef XEN
- extern char saved_command_line[];
- for (cp = saved_command_line; *cp; ) {
-#else
- for (cp = boot_command_line; *cp; ) {
-#endif
- if (memcmp(cp, "mem=", 4) == 0) {
- mem_limit = memparse(cp + 4, &cp);
- } else if (memcmp(cp, "max_addr=", 9) == 0) {
- max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
- } else if (memcmp(cp, "min_addr=", 9) == 0) {
- min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
- } else {
- while (*cp != ' ' && *cp)
- ++cp;
- while (*cp == ' ')
- ++cp;
- }
- }
- if (min_addr != 0UL)
- printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
- if (max_addr != ~0UL)
- printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
-
- efi.systab = __va(ia64_boot_param->efi_systab);
-
- /*
- * Verify the EFI Table
- */
- if (efi.systab == NULL)
- panic("Woah! Can't find EFI system table.\n");
- if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
- panic("Woah! EFI system table signature incorrect\n");
- if ((efi.systab->hdr.revision >> 16) == 0)
- printk(KERN_WARNING "Warning: EFI system table version "
- "%d.%02d, expected 1.00 or greater\n",
- efi.systab->hdr.revision >> 16,
- efi.systab->hdr.revision & 0xffff);
-
- config_tables = __va(efi.systab->tables);
-
- /* Show what we know for posterity */
- c16 = __va(efi.systab->fw_vendor);
- if (c16) {
- for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i)
- vendor[i] = *c16++;
- vendor[i] = '\0';
- }
-
- printk(KERN_INFO "EFI v%u.%.02u by %s:",
- efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);
-
- efi.mps = EFI_INVALID_TABLE_ADDR;
- efi.acpi = EFI_INVALID_TABLE_ADDR;
- efi.acpi20 = EFI_INVALID_TABLE_ADDR;
- efi.smbios = EFI_INVALID_TABLE_ADDR;
- efi.sal_systab = EFI_INVALID_TABLE_ADDR;
- efi.boot_info = EFI_INVALID_TABLE_ADDR;
- efi.hcdp = EFI_INVALID_TABLE_ADDR;
- efi.uga = EFI_INVALID_TABLE_ADDR;
-
- for (i = 0; i < (int) efi.systab->nr_tables; i++) {
- if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
- efi.mps = config_tables[i].table;
- printk(" MPS=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
- efi.acpi20 = config_tables[i].table;
- printk(" ACPI 2.0=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
- efi.acpi = config_tables[i].table;
- printk(" ACPI=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
- efi.smbios = config_tables[i].table;
- printk(" SMBIOS=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
- efi.sal_systab = config_tables[i].table;
- printk(" SALsystab=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
- efi.hcdp = config_tables[i].table;
- printk(" HCDP=0x%lx", config_tables[i].table);
- }
- }
- printk("\n");
-
- runtime = __va(efi.systab->runtime);
- efi.get_time = phys_get_time;
- efi.set_time = phys_set_time;
- efi.get_wakeup_time = phys_get_wakeup_time;
- efi.set_wakeup_time = phys_set_wakeup_time;
- efi.get_variable = phys_get_variable;
- efi.get_next_variable = phys_get_next_variable;
- efi.set_variable = phys_set_variable;
- efi.get_next_high_mono_count = phys_get_next_high_mono_count;
- efi.reset_system = phys_reset_system;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-#if EFI_DEBUG
- /* print EFI memory map: */
- {
- efi_memory_desc_t *md;
- void *p;
-
- for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
- md = p;
- printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
- i, md->type, md->attribute, md->phys_addr,
- md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
- md->num_pages >> (20 - EFI_PAGE_SHIFT));
- }
- }
-#endif
-
-#ifndef XEN
- efi_map_pal_code();
-#endif
- efi_enter_virtual_mode();
-}
-
-void
-efi_enter_virtual_mode (void)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- efi_status_t status;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
-#ifdef XEN
- if (md->attribute & EFI_MEMORY_RUNTIME) {
- if (md->attribute & EFI_MEMORY_WB)
- md->virt_addr = __IA64_EFI_CACHED_OFFSET|
- md->phys_addr;
- else if (md->attribute & (EFI_MEMORY_UC|EFI_MEMORY_WC|
- EFI_MEMORY_WT))
- md->virt_addr = __IA64_EFI_UNCACHED_OFFSET|
- md->phys_addr;
- }
-#else
- if (md->attribute & EFI_MEMORY_RUNTIME) {
- /*
- * Some descriptors have multiple bits set, so the order of
- * the tests is relevant.
- */
- if (md->attribute & EFI_MEMORY_WB) {
- md->virt_addr = (u64) __va(md->phys_addr);
- } else if (md->attribute & EFI_MEMORY_UC) {
- md->virt_addr = (u64) ioremap(md->phys_addr, 0);
- } else if (md->attribute & EFI_MEMORY_WC) {
-#if 0
- md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
- | _PAGE_D
- | _PAGE_MA_WC
- | _PAGE_PL_0
- | _PAGE_AR_RW));
-#else
- printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
- md->virt_addr = (u64) ioremap(md->phys_addr, 0);
-#endif
- } else if (md->attribute & EFI_MEMORY_WT) {
-#if 0
- md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
- | _PAGE_D | _PAGE_MA_WT
- | _PAGE_PL_0
- | _PAGE_AR_RW));
-#else
- printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
- md->virt_addr = (u64) ioremap(md->phys_addr, 0);
-#endif
- }
- }
-#endif
- }
-
- status = efi_call_phys(__va(runtime->set_virtual_address_map),
- ia64_boot_param->efi_memmap_size,
- efi_desc_size, ia64_boot_param->efi_memdesc_version,
- ia64_boot_param->efi_memmap);
- if (status != EFI_SUCCESS) {
- printk(KERN_WARNING "warning: unable to switch EFI into virtual mode "
- "(status=%lu)\n", status);
- return;
- }
-
- /*
- * Now that EFI is in virtual mode, we call the EFI functions more efficiently:
- */
- efi.get_time = virt_get_time;
- efi.set_time = virt_set_time;
- efi.get_wakeup_time = virt_get_wakeup_time;
- efi.set_wakeup_time = virt_set_wakeup_time;
- efi.get_variable = virt_get_variable;
- efi.get_next_variable = virt_get_next_variable;
- efi.set_variable = virt_set_variable;
- efi.get_next_high_mono_count = virt_get_next_high_mono_count;
- efi.reset_system = virt_reset_system;
-}
-
-/*
- * Walk the EFI memory map looking for the I/O port range. There can only be one entry of
- * this type, other I/O port ranges should be described via ACPI.
- */
-u64
-efi_get_iobase (void)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
- if (md->attribute & EFI_MEMORY_UC)
- return md->phys_addr;
- }
- }
- return 0;
-}
-
-static struct kern_memdesc *
-kern_memory_descriptor (unsigned long phys_addr)
-{
- struct kern_memdesc *md;
-
- for (md = kern_memmap; md->start != ~0UL; md++) {
- if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
- return md;
- }
- return NULL;
-}
-
-static efi_memory_desc_t *
-efi_memory_descriptor (unsigned long phys_addr)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
-
- if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
- return md;
- }
- return NULL;
-}
-
-u32
-efi_mem_type (unsigned long phys_addr)
-{
- efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
-
- if (md)
- return md->type;
- return 0;
-}
-
-u64
-efi_mem_attributes (unsigned long phys_addr)
-{
- efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
-
- if (md)
- return md->attribute;
- return 0;
-}
-EXPORT_SYMBOL(efi_mem_attributes);
-
-u64
-efi_mem_attribute (unsigned long phys_addr, unsigned long size)
-{
- unsigned long end = phys_addr + size;
- efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
- u64 attr;
-
- if (!md)
- return 0;
-
- /*
- * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
- * the kernel that firmware needs this region mapped.
- */
- attr = md->attribute & ~EFI_MEMORY_RUNTIME;
- do {
- unsigned long md_end = efi_md_end(md);
-
- if (end <= md_end)
- return attr;
-
- md = efi_memory_descriptor(md_end);
- if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
- return 0;
- } while (md);
- return 0;
-}
-
-u64
-kern_mem_attribute (unsigned long phys_addr, unsigned long size)
-{
- unsigned long end = phys_addr + size;
- struct kern_memdesc *md;
- u64 attr;
-
- /*
- * This is a hack for ioremap calls before we set up kern_memmap.
- * Maybe we should do efi_memmap_init() earlier instead.
- */
- if (!kern_memmap) {
- attr = efi_mem_attribute(phys_addr, size);
- if (attr & EFI_MEMORY_WB)
- return EFI_MEMORY_WB;
- return 0;
- }
-
- md = kern_memory_descriptor(phys_addr);
- if (!md)
- return 0;
-
- attr = md->attribute;
- do {
- unsigned long md_end = kmd_end(md);
-
- if (end <= md_end)
- return attr;
-
- md = kern_memory_descriptor(md_end);
- if (!md || md->attribute != attr)
- return 0;
- } while (md);
- return 0;
-}
-EXPORT_SYMBOL(kern_mem_attribute);
-
-#ifndef XEN
-int
-valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
-{
- u64 attr;
-
- /*
- * /dev/mem reads and writes use copy_to_user(), which implicitly
- * uses a granule-sized kernel identity mapping. It's really
- * only safe to do this for regions in kern_memmap. For more
- * details, see Documentation/ia64/aliasing.txt.
- */
- attr = kern_mem_attribute(phys_addr, size);
- if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
- return 1;
- return 0;
-}
-
-int
-valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
-{
- /*
- * MMIO regions are often missing from the EFI memory map.
- * We must allow mmap of them for programs like X, so we
- * currently can't do any useful validation.
- */
- return 1;
-}
-
-pgprot_t
-phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
- pgprot_t vma_prot)
-{
- unsigned long phys_addr = pfn << PAGE_SHIFT;
- u64 attr;
-
- /*
- * For /dev/mem mmap, we use user mappings, but if the region is
- * in kern_memmap (and hence may be covered by a kernel mapping),
- * we must use the same attribute as the kernel mapping.
- */
- attr = kern_mem_attribute(phys_addr, size);
- if (attr & EFI_MEMORY_WB)
- return pgprot_cacheable(vma_prot);
- else if (attr & EFI_MEMORY_UC)
- return pgprot_noncached(vma_prot);
-
- /*
- * Some chipsets don't support UC access to memory. If
- * WB is supported, we prefer that.
- */
- if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
- return pgprot_cacheable(vma_prot);
-
- return pgprot_noncached(vma_prot);
-}
-#endif
-
-int __init
-efi_uart_console_only(void)
-{
- efi_status_t status;
- char *s, name[] = "ConOut";
- efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
- efi_char16_t *utf16, name_utf16[32];
- unsigned char data[1024];
- unsigned long size = sizeof(data);
- struct efi_generic_dev_path *hdr, *end_addr;
- int uart = 0;
-
- /* Convert to UTF-16 */
- utf16 = name_utf16;
- s = name;
- while (*s)
- *utf16++ = *s++ & 0x7f;
- *utf16 = 0;
-
- status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
- if (status != EFI_SUCCESS) {
- printk(KERN_ERR "No EFI %s variable?\n", name);
- return 0;
- }
-
- hdr = (struct efi_generic_dev_path *) data;
- end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
- while (hdr < end_addr) {
- if (hdr->type == EFI_DEV_MSG &&
- hdr->sub_type == EFI_DEV_MSG_UART)
- uart = 1;
- else if (hdr->type == EFI_DEV_END_PATH ||
- hdr->type == EFI_DEV_END_PATH2) {
- if (!uart)
- return 0;
- if (hdr->sub_type == EFI_DEV_END_ENTIRE)
- return 1;
- uart = 0;
- }
- hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length);
- }
- printk(KERN_ERR "Malformed %s value\n", name);
- return 0;
-}
-
-/*
- * Look for the first granule aligned memory descriptor memory
- * that is big enough to hold EFI memory map. Make sure this
- * descriptor is atleast granule sized so it does not get trimmed
- */
-struct kern_memdesc *
-find_memmap_space (void)
-{
- u64 contig_low=0, contig_high=0;
- u64 as = 0, ae;
- void *efi_map_start, *efi_map_end, *p, *q;
- efi_memory_desc_t *md, *pmd = NULL, *check_md;
- u64 space_needed, efi_desc_size;
- unsigned long total_mem = 0;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- /*
- * Worst case: we need 3 kernel descriptors for each efi descriptor
- * (if every entry has a WB part in the middle, and UC head and tail),
- * plus one for the end marker.
- */
- space_needed = sizeof(kern_memdesc_t) *
- (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);
-
- for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
- md = p;
- if (!efi_wb(md)) {
- continue;
- }
- if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
- contig_low = GRANULEROUNDUP(md->phys_addr);
- contig_high = efi_md_end(md);
- for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
- check_md = q;
- if (!efi_wb(check_md))
- break;
- if (contig_high != check_md->phys_addr)
- break;
- contig_high = efi_md_end(check_md);
- }
- contig_high = GRANULEROUNDDOWN(contig_high);
- }
- if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
- continue;
-
- /* Round ends inward to granule boundaries */
- as = max(contig_low, md->phys_addr);
- ae = min(contig_high, efi_md_end(md));
-
- /* keep within max_addr= and min_addr= command line arg */
- as = max(as, min_addr);
- ae = min(ae, max_addr);
- if (ae <= as)
- continue;
-
- /* avoid going over mem= command line arg */
- if (total_mem + (ae - as) > mem_limit)
- ae -= total_mem + (ae - as) - mem_limit;
-
- if (ae <= as)
- continue;
-
- if (ae - as > space_needed)
- break;
- }
- if (p >= efi_map_end)
- panic("Can't allocate space for kernel memory descriptors");
-
- return __va(as);
-}
-
-/*
- * Walk the EFI memory map and gather all memory available for kernel
- * to use. We can allocate partial granules only if the unavailable
- * parts exist, and are WB.
- */
-void
-efi_memmap_init(unsigned long *s, unsigned long *e)
-{
- struct kern_memdesc *k, *prev = NULL;
- u64 contig_low=0, contig_high=0;
- u64 as, ae, lim;
- void *efi_map_start, *efi_map_end, *p, *q;
- efi_memory_desc_t *md, *pmd = NULL, *check_md;
- u64 efi_desc_size;
- unsigned long total_mem = 0;
-
- k = kern_memmap = find_memmap_space();
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
- md = p;
- if (!efi_wb(md)) {
- if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY ||
- md->type == EFI_BOOT_SERVICES_DATA)) {
- k->attribute = EFI_MEMORY_UC;
- k->start = md->phys_addr;
- k->num_pages = md->num_pages;
- k++;
- }
- continue;
- }
-#ifdef XEN
- /* this works around a problem in the ski bootloader */
- if (running_on_sim && md->type != EFI_CONVENTIONAL_MEMORY)
- continue;
-#endif
- if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
- contig_low = GRANULEROUNDUP(md->phys_addr);
- contig_high = efi_md_end(md);
- for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
- check_md = q;
- if (!efi_wb(check_md))
- break;
- if (contig_high != check_md->phys_addr)
- break;
- contig_high = efi_md_end(check_md);
- }
- contig_high = GRANULEROUNDDOWN(contig_high);
- }
- if (!is_memory_available(md))
- continue;
-
-#ifdef CONFIG_CRASH_DUMP
- /* saved_max_pfn should ignore max_addr= command line arg */
- if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
- saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
-#endif
- /*
- * Round ends inward to granule boundaries
- * Give trimmings to uncached allocator
- */
- if (md->phys_addr < contig_low) {
- lim = min(efi_md_end(md), contig_low);
- if (efi_uc(md)) {
- if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC &&
- kmd_end(k-1) == md->phys_addr) {
- (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
- } else {
- k->attribute = EFI_MEMORY_UC;
- k->start = md->phys_addr;
- k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
- k++;
- }
- }
- as = contig_low;
- } else
- as = md->phys_addr;
-
- if (efi_md_end(md) > contig_high) {
- lim = max(md->phys_addr, contig_high);
- if (efi_uc(md)) {
- if (lim == md->phys_addr && k > kern_memmap &&
- (k-1)->attribute == EFI_MEMORY_UC &&
- kmd_end(k-1) == md->phys_addr) {
- (k-1)->num_pages += md->num_pages;
- } else {
- k->attribute = EFI_MEMORY_UC;
- k->start = lim;
- k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT;
- k++;
- }
- }
- ae = contig_high;
- } else
- ae = efi_md_end(md);
-
- /* keep within max_addr= and min_addr= command line arg */
- as = max(as, min_addr);
- ae = min(ae, max_addr);
- if (ae <= as)
- continue;
-
- /* avoid going over mem= command line arg */
- if (total_mem + (ae - as) > mem_limit)
- ae -= total_mem + (ae - as) - mem_limit;
-
- if (ae <= as)
- continue;
- if (prev && kmd_end(prev) == md->phys_addr) {
- prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
- total_mem += ae - as;
- continue;
- }
- k->attribute = EFI_MEMORY_WB;
- k->start = as;
- k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
- total_mem += ae - as;
- prev = k++;
- }
- k->start = ~0L; /* end-marker */
-
- /* reserve the memory we are using for kern_memmap */
- *s = (u64)kern_memmap;
- *e = (u64)++k;
-}
-
-#ifndef XEN
-void
-efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource)
-{
- struct resource *res;
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
- char *name;
- unsigned long flags;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- res = NULL;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
-
- if (md->num_pages == 0) /* should not happen */
- continue;
-
- flags = IORESOURCE_MEM;
- switch (md->type) {
-
- case EFI_MEMORY_MAPPED_IO:
- case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
- continue;
-
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- case EFI_BOOT_SERVICES_DATA:
- case EFI_BOOT_SERVICES_CODE:
- case EFI_CONVENTIONAL_MEMORY:
- if (md->attribute & EFI_MEMORY_WP) {
- name = "System ROM";
- flags |= IORESOURCE_READONLY;
- } else {
- name = "System RAM";
- }
- break;
-
- case EFI_ACPI_MEMORY_NVS:
- name = "ACPI Non-volatile Storage";
- flags |= IORESOURCE_BUSY;
- break;
-
- case EFI_UNUSABLE_MEMORY:
- name = "reserved";
- flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED;
- break;
-
- case EFI_RESERVED_TYPE:
- case EFI_RUNTIME_SERVICES_CODE:
- case EFI_RUNTIME_SERVICES_DATA:
- case EFI_ACPI_RECLAIM_MEMORY:
- default:
- name = "reserved";
- flags |= IORESOURCE_BUSY;
- break;
- }
-
- if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "failed to alocate resource for iomem\n");
- return;
- }
-
- res->name = name;
- res->start = md->phys_addr;
- res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
- res->flags = flags;
-
- if (insert_resource(&iomem_resource, res) < 0)
- kfree(res);
- else {
- /*
- * We don't know which region contains
- * kernel data so we try it repeatedly and
- * let the resource manager test it.
- */
- insert_resource(res, code_resource);
- insert_resource(res, data_resource);
-#ifdef CONFIG_KEXEC
- insert_resource(res, &efi_memmap_res);
- insert_resource(res, &boot_param_res);
- if (crashk_res.end > crashk_res.start)
- insert_resource(res, &crashk_res);
-#endif
- }
- }
-}
-#endif /* XEN */
-
-#if defined(CONFIG_KEXEC) || defined(XEN)
-/* find a block of memory aligned to 64M exclude reserved regions
- rsvd_regions are sorted
- */
-unsigned long __init
-kdump_find_rsvd_region (unsigned long size,
- struct rsvd_region *r, int n)
-{
- int i;
- u64 start, end;
- u64 alignment = 1UL << _PAGE_SIZE_64M;
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (!efi_wb(md))
- continue;
- start = ALIGN(md->phys_addr, alignment);
- end = efi_md_end(md);
- for (i = 0; i < n; i++) {
- if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
- if (__pa(r[i].start) > start + size)
- return start;
- start = ALIGN(__pa(r[i].end), alignment);
- if (i < n-1 && __pa(r[i+1].start) < start + size)
- continue;
- else
- break;
- }
- }
- if (end > start + size)
- return start;
- }
-
- printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
- size);
- return ~0UL;
-}
-#endif
-
-#ifndef XEN
-#ifdef CONFIG_PROC_VMCORE
-/* locate the size find a the descriptor at a certain address */
-unsigned long
-vmcore_find_descriptor_size (unsigned long address)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
- unsigned long ret = 0;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (efi_wb(md) && md->type == EFI_LOADER_DATA
- && md->phys_addr == address) {
- ret = efi_md_size(md);
- break;
- }
- }
-
- if (ret == 0)
- printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");
-
- return ret;
-}
-#endif
-#endif /* XEN */
diff --git a/xen/arch/ia64/linux-xen/entry.S b/xen/arch/ia64/linux-xen/entry.S
deleted file mode 100644
index 99a0b43200..0000000000
--- a/xen/arch/ia64/linux-xen/entry.S
+++ /dev/null
@@ -1,1851 +0,0 @@
-/*
- * ia64/kernel/entry.S
- *
- * Kernel entry points.
- *
- * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999, 2002-2003
- * Asit Mallick <Asit.K.Mallick@intel.com>
- * Don Dugger <Don.Dugger@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Fenghua Yu <fenghua.yu@intel.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- */
-/*
- * ia64_switch_to now places correct virtual mapping in in TR2 for
- * kernel stack. This allows us to handle interrupts without changing
- * to physical mode.
- *
- * Jonathan Nicklin <nicklin@missioncriticallinux.com>
- * Patrick O'Rourke <orourke@missioncriticallinux.com>
- * 11/07/2000
- */
-/*
- * Global (preserved) predicate usage on syscall entry/exit path:
- *
- * pKStk: See entry.h.
- * pUStk: See entry.h.
- * pSys: See entry.h.
- * pNonSys: !pSys
- */
-
-#include <linux/config.h>
-
-#include <asm/asmmacro.h>
-#include <asm/cache.h>
-#ifdef XEN
-#include <xen/errno.h>
-#else
-#include <asm/errno.h>
-#endif
-#include <asm/kregs.h>
-#include <asm/offsets.h>
-#include <asm/pgtable.h>
-#include <asm/percpu.h>
-#include <asm/processor.h>
-#include <asm/thread_info.h>
-#include <asm/unistd.h>
-
-#include "minstate.h"
-
-#ifndef XEN
- /*
- * execve() is special because in case of success, we need to
- * setup a null register window frame.
- */
-ENTRY(ia64_execve)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,4,0
- mov loc0=rp
- .body
- mov out0=in0 // filename
- ;; // stop bit between alloc and call
- mov out1=in1 // argv
- mov out2=in2 // envp
- add out3=16,sp // regs
- br.call.sptk.many rp=sys_execve
-.ret0:
-#ifdef CONFIG_IA32_SUPPORT
- /*
- * Check if we're returning to ia32 mode. If so, we need to restore ia32 registers
- * from pt_regs.
- */
- adds r16=PT(CR_IPSR)+16,sp
- ;;
- ld8 r16=[r16]
-#endif
- cmp4.ge p6,p7=r8,r0
- mov ar.pfs=loc1 // restore ar.pfs
- sxt4 r8=r8 // return 64-bit result
- ;;
- stf.spill [sp]=f0
-(p6) cmp.ne pKStk,pUStk=r0,r0 // a successful execve() lands us in user-mode...
- mov rp=loc0
-(p6) mov ar.pfs=r0 // clear ar.pfs on success
-(p7) br.ret.sptk.many rp
-
- /*
- * In theory, we'd have to zap this state only to prevent leaking of
- * security sensitive state (e.g., if current->mm->dumpable is zero). However,
- * this executes in less than 20 cycles even on Itanium, so it's not worth
- * optimizing for...).
- */
- mov ar.unat=0; mov ar.lc=0
- mov r4=0; mov f2=f0; mov b1=r0
- mov r5=0; mov f3=f0; mov b2=r0
- mov r6=0; mov f4=f0; mov b3=r0
- mov r7=0; mov f5=f0; mov b4=r0
- ldf.fill f12=[sp]; mov f13=f0; mov b5=r0
- ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0
- ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0
- ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0
- ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
- ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
- ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
-#ifdef CONFIG_IA32_SUPPORT
- tbit.nz p6,p0=r16, IA64_PSR_IS_BIT
- movl loc0=ia64_ret_from_ia32_execve
- ;;
-(p6) mov rp=loc0
-#endif
- br.ret.sptk.many rp
-END(ia64_execve)
-
-/*
- * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr,
- * u64 tls)
- */
-GLOBAL_ENTRY(sys_clone2)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc r16=ar.pfs,8,2,6,0
- DO_SAVE_SWITCH_STACK
- adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
- mov loc0=rp
- mov loc1=r16 // save ar.pfs across do_fork
- .body
- mov out1=in1
- mov out3=in2
- tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
- mov out4=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
- ;;
-(p6) st8 [r2]=in5 // store TLS in r16 for copy_thread()
- mov out5=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
- adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
- mov out0=in0 // out0 = clone_flags
- br.call.sptk.many rp=do_fork
-.ret1: .restore sp
- adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(sys_clone2)
-
-/*
- * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls)
- * Deprecated. Use sys_clone2() instead.
- */
-GLOBAL_ENTRY(sys_clone)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc r16=ar.pfs,8,2,6,0
- DO_SAVE_SWITCH_STACK
- adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
- mov loc0=rp
- mov loc1=r16 // save ar.pfs across do_fork
- .body
- mov out1=in1
- mov out3=16 // stacksize (compensates for 16-byte scratch area)
- tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
- mov out4=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
- ;;
-(p6) st8 [r2]=in4 // store TLS in r13 (tp)
- mov out5=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
- adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
- mov out0=in0 // out0 = clone_flags
- br.call.sptk.many rp=do_fork
-.ret2: .restore sp
- adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(sys_clone)
-#endif
-
-/*
- * prev_task <- ia64_switch_to(struct task_struct *next)
- * With Ingo's new scheduler, interrupts are disabled when this routine gets
- * called. The code starting at .map relies on this. The rest of the code
- * doesn't care about the interrupt masking status.
- */
-GLOBAL_ENTRY(ia64_switch_to)
- .prologue
- alloc r16=ar.pfs,1,0,0,0
- DO_SAVE_SWITCH_STACK
- .body
-
- adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
-#ifdef XEN
- movl r24=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_STACK_OFFSET;;
- ld8 r27=[r24]
- adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
- dep r20=0,in0,60,4 // physical address of "next"
-#else
- movl r25=init_task
- mov r27=IA64_KR(CURRENT_STACK)
- adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
- dep r20=0,in0,61,3 // physical address of "next"
-#endif
- ;;
- st8 [r22]=sp // save kernel stack pointer of old task
- shr.u r26=r20,IA64_GRANULE_SHIFT
-#ifdef XEN
- ;;
- /*
- * If we've already mapped this task's page, we can skip doing it again.
- */
- cmp.eq p7,p6=r26,r27
-(p6) br.cond.dpnt .map
-#else
- cmp.eq p7,p6=r25,in0
- ;;
- /*
- * If we've already mapped this task's page, we can skip doing it again.
- */
-(p6) cmp.eq p7,p6=r26,r27
-(p6) br.cond.dpnt .map
-#endif
- ;;
-.done:
-(p6) ssm psr.ic // if we had to map, reenable the psr.ic bit FIRST!!!
- ;;
-(p6) srlz.d
- ld8 sp=[r21] // load kernel stack pointer of new task
-#ifdef XEN
- add r25=IA64_KR_CURRENT_OFFSET-IA64_KR_CURRENT_STACK_OFFSET,r24
- ;;
- st8 [r25]=in0 // update "current" application register
- ;;
- bsw.0
- ;;
- mov r8=r13 // return pointer to previously running task
- mov r13=in0 // set "current" pointer
- mov r21=in0
- ;;
- bsw.1
- ;;
-#else
- mov IA64_KR(CURRENT)=in0 // update "current" application register
- mov r8=r13 // return pointer to previously running task
- mov r13=in0 // set "current" pointer
-#endif
- DO_LOAD_SWITCH_STACK
-
-#ifdef CONFIG_SMP
- sync.i // ensure "fc"s done by this CPU are visible on other CPUs
-#endif
- br.ret.sptk.many rp // boogie on out in new context
-
-.map:
- rsm psr.ic // interrupts (psr.i) are already disabled here
- movl r25=PAGE_KERNEL
-#ifdef XEN
- movl r27=IA64_GRANULE_SHIFT << 2
-#endif
- ;;
- srlz.d
- or r23=r25,r20 // construct PA | page properties
-#ifdef XEN
- ptr.d in0,r27 // to purge dtr[IA64_TR_VHPT] and dtr[IA64_TR_VPD]
-#else
- movl r27=IA64_GRANULE_SHIFT << 2
-#endif
- ;;
- mov cr.itir=r27
- mov cr.ifa=in0 // VA of next task...
-#ifdef XEN
- srlz.d
-#endif
- ;;
- mov r25=IA64_TR_CURRENT_STACK
-#ifdef XEN
- st8 [r24]=r26 // remember last page we mapped...
-#else
- mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
-#endif
- ;;
- itr.d dtr[r25]=r23 // wire in new mapping...
- br.cond.sptk .done
-END(ia64_switch_to)
-
-/*
- * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
- * means that we may get an interrupt with "sp" pointing to the new kernel stack while
- * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc,
- * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a
- * problem. Also, we don't need to specify unwind information for preserved registers
- * that are not modified in save_switch_stack as the right unwind information is already
- * specified at the call-site of save_switch_stack.
- */
-
-/*
- * save_switch_stack:
- * - r16 holds ar.pfs
- * - b7 holds address to return to
- * - rp (b0) holds return address to save
- */
-GLOBAL_ENTRY(save_switch_stack)
- .prologue
- .altrp b7
- flushrs // flush dirty regs to backing store (must be first in insn group)
- .save @priunat,r17
- mov r17=ar.unat // preserve caller's
- .body
-#ifdef CONFIG_ITANIUM
- adds r2=16+128,sp
- adds r3=16+64,sp
- adds r14=SW(R4)+16,sp
- ;;
- st8.spill [r14]=r4,16 // spill r4
- lfetch.fault.excl.nt1 [r3],128
- ;;
- lfetch.fault.excl.nt1 [r2],128
- lfetch.fault.excl.nt1 [r3],128
- ;;
- lfetch.fault.excl [r2]
- lfetch.fault.excl [r3]
- adds r15=SW(R5)+16,sp
-#else
- add r2=16+3*128,sp
- add r3=16,sp
- add r14=SW(R4)+16,sp
- ;;
- st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0
- lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010
- ;;
- lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090
- lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190
- ;;
- lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110
- lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210
- adds r15=SW(R5)+16,sp
-#endif
- ;;
- st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5
- mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0
- add r2=SW(F2)+16,sp // r2 = &sw->f2
- ;;
- st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6
- mov.m r18=ar.fpsr // preserve fpsr
- add r3=SW(F3)+16,sp // r3 = &sw->f3
- ;;
- stf.spill [r2]=f2,32
- mov.m r19=ar.rnat
- mov r21=b0
-
- stf.spill [r3]=f3,32
- st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7
- mov r22=b1
- ;;
- // since we're done with the spills, read and save ar.unat:
- mov.m r29=ar.unat
- mov.m r20=ar.bspstore
- mov r23=b2
- stf.spill [r2]=f4,32
- stf.spill [r3]=f5,32
- mov r24=b3
- ;;
- st8 [r14]=r21,SW(B1)-SW(B0) // save b0
- st8 [r15]=r23,SW(B3)-SW(B2) // save b2
- mov r25=b4
- mov r26=b5
- ;;
- st8 [r14]=r22,SW(B4)-SW(B1) // save b1
- st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3
- mov r21=ar.lc // I-unit
- stf.spill [r2]=f12,32
- stf.spill [r3]=f13,32
- ;;
- st8 [r14]=r25,SW(B5)-SW(B4) // save b4
- st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs
- stf.spill [r2]=f14,32
- stf.spill [r3]=f15,32
- ;;
- st8 [r14]=r26 // save b5
- st8 [r15]=r21 // save ar.lc
- stf.spill [r2]=f16,32
- stf.spill [r3]=f17,32
- ;;
- stf.spill [r2]=f18,32
- stf.spill [r3]=f19,32
- ;;
- stf.spill [r2]=f20,32
- stf.spill [r3]=f21,32
- ;;
- stf.spill [r2]=f22,32
- stf.spill [r3]=f23,32
- ;;
- stf.spill [r2]=f24,32
- stf.spill [r3]=f25,32
- ;;
- stf.spill [r2]=f26,32
- stf.spill [r3]=f27,32
- ;;
- stf.spill [r2]=f28,32
- stf.spill [r3]=f29,32
- ;;
- stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
- stf.spill [r3]=f31,SW(PR)-SW(F31)
- add r14=SW(CALLER_UNAT)+16,sp
- ;;
- st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat
- st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
- mov r21=pr
- ;;
- st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
- st8 [r3]=r21 // save predicate registers
- ;;
- st8 [r2]=r20 // save ar.bspstore
- st8 [r14]=r18 // save fpsr
- mov ar.rsc=3 // put RSE back into eager mode, pl 0
- br.cond.sptk.many b7
-END(save_switch_stack)
-
-/*
- * load_switch_stack:
- * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
- * - b7 holds address to return to
- * - must not touch r8-r11
- */
-#ifdef XEN
-GLOBAL_ENTRY(load_switch_stack)
-#else
-ENTRY(load_switch_stack)
-#endif
- .prologue
- .altrp b7
-
- .body
- lfetch.fault.nt1 [sp]
- adds r2=SW(AR_BSPSTORE)+16,sp
- adds r3=SW(AR_UNAT)+16,sp
- mov ar.rsc=0 // put RSE into enforced lazy mode
- adds r14=SW(CALLER_UNAT)+16,sp
- adds r15=SW(AR_FPSR)+16,sp
- ;;
- ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore
- ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat
- ;;
- ld8 r21=[r2],16 // restore b0
- ld8 r22=[r3],16 // restore b1
- ;;
- ld8 r23=[r2],16 // restore b2
- ld8 r24=[r3],16 // restore b3
- ;;
- ld8 r25=[r2],16 // restore b4
- ld8 r26=[r3],16 // restore b5
- ;;
- ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs
- ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc
- ;;
- ld8 r28=[r2] // restore pr
- ld8 r30=[r3] // restore rnat
- ;;
- ld8 r18=[r14],16 // restore caller's unat
- ld8 r19=[r15],24 // restore fpsr
- ;;
- ldf.fill f2=[r14],32
- ldf.fill f3=[r15],32
- ;;
- ldf.fill f4=[r14],32
- ldf.fill f5=[r15],32
- ;;
- ldf.fill f12=[r14],32
- ldf.fill f13=[r15],32
- ;;
- ldf.fill f14=[r14],32
- ldf.fill f15=[r15],32
- ;;
- ldf.fill f16=[r14],32
- ldf.fill f17=[r15],32
- ;;
- ldf.fill f18=[r14],32
- ldf.fill f19=[r15],32
- mov b0=r21
- ;;
- ldf.fill f20=[r14],32
- ldf.fill f21=[r15],32
- mov b1=r22
- ;;
- ldf.fill f22=[r14],32
- ldf.fill f23=[r15],32
- mov b2=r23
- ;;
- mov ar.bspstore=r27
- mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7
- mov b3=r24
- ;;
- ldf.fill f24=[r14],32
- ldf.fill f25=[r15],32
- mov b4=r25
- ;;
- ldf.fill f26=[r14],32
- ldf.fill f27=[r15],32
- mov b5=r26
- ;;
- ldf.fill f28=[r14],32
- ldf.fill f29=[r15],32
- mov ar.pfs=r16
- ;;
- ldf.fill f30=[r14],32
- ldf.fill f31=[r15],24
- mov ar.lc=r17
- ;;
- ld8.fill r4=[r14],16
- ld8.fill r5=[r15],16
- mov pr=r28,-1
- ;;
- ld8.fill r6=[r14],16
- ld8.fill r7=[r15],16
-
- mov ar.unat=r18 // restore caller's unat
- mov ar.rnat=r30 // must restore after bspstore but before rsc!
- mov ar.fpsr=r19 // restore fpsr
- mov ar.rsc=3 // put RSE back into eager mode, pl 0
- br.cond.sptk.many b7
-END(load_switch_stack)
-
-#ifndef XEN
-GLOBAL_ENTRY(execve)
- mov r15=__NR_execve // put syscall number in place
- break __BREAK_SYSCALL
- br.ret.sptk.many rp
-END(execve)
-
-GLOBAL_ENTRY(clone)
- mov r15=__NR_clone // put syscall number in place
- break __BREAK_SYSCALL
- br.ret.sptk.many rp
-END(clone)
-
- /*
- * Invoke a system call, but do some tracing before and after the call.
- * We MUST preserve the current register frame throughout this routine
- * because some system calls (such as ia64_execve) directly
- * manipulate ar.pfs.
- */
-GLOBAL_ENTRY(ia64_trace_syscall)
- PT_REGS_UNWIND_INFO(0)
- /*
- * We need to preserve the scratch registers f6-f11 in case the system
- * call is sigreturn.
- */
- adds r16=PT(F6)+16,sp
- adds r17=PT(F7)+16,sp
- ;;
- stf.spill [r16]=f6,32
- stf.spill [r17]=f7,32
- ;;
- stf.spill [r16]=f8,32
- stf.spill [r17]=f9,32
- ;;
- stf.spill [r16]=f10
- stf.spill [r17]=f11
- br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
- adds r16=PT(F6)+16,sp
- adds r17=PT(F7)+16,sp
- ;;
- ldf.fill f6=[r16],32
- ldf.fill f7=[r17],32
- ;;
- ldf.fill f8=[r16],32
- ldf.fill f9=[r17],32
- ;;
- ldf.fill f10=[r16]
- ldf.fill f11=[r17]
- // the syscall number may have changed, so re-load it and re-calculate the
- // syscall entry-point:
- adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #)
- ;;
- ld8 r15=[r15]
- mov r3=NR_syscalls - 1
- ;;
- adds r15=-1024,r15
- movl r16=sys_call_table
- ;;
- shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
- cmp.leu p6,p7=r15,r3
- ;;
-(p6) ld8 r20=[r20] // load address of syscall entry point
-(p7) movl r20=sys_ni_syscall
- ;;
- mov b6=r20
- br.call.sptk.many rp=b6 // do the syscall
-.strace_check_retval:
- cmp.lt p6,p0=r8,r0 // syscall failed?
- adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
- adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
- mov r10=0
-(p6) br.cond.sptk strace_error // syscall failed ->
- ;; // avoid RAW on r10
-.strace_save_retval:
-.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
-.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
- br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
-.ret3: br.cond.sptk .work_pending_syscall_end
-
-strace_error:
- ld8 r3=[r2] // load pt_regs.r8
- sub r9=0,r8 // negate return value to get errno value
- ;;
- cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
- adds r3=16,r2 // r3=&pt_regs.r10
- ;;
-(p6) mov r10=-1
-(p6) mov r8=r9
- br.cond.sptk .strace_save_retval
-END(ia64_trace_syscall)
-
- /*
- * When traced and returning from sigreturn, we invoke syscall_trace but then
- * go straight to ia64_leave_kernel rather than ia64_leave_syscall.
- */
-GLOBAL_ENTRY(ia64_strace_leave_kernel)
- PT_REGS_UNWIND_INFO(0)
-{ /*
- * Some versions of gas generate bad unwind info if the first instruction of a
- * procedure doesn't go into the first slot of a bundle. This is a workaround.
- */
- nop.m 0
- nop.i 0
- br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
-}
-.ret4: br.cond.sptk ia64_leave_kernel
-END(ia64_strace_leave_kernel)
-#endif
-
-GLOBAL_ENTRY(ia64_ret_from_clone)
- PT_REGS_UNWIND_INFO(0)
-{ /*
- * Some versions of gas generate bad unwind info if the first instruction of a
- * procedure doesn't go into the first slot of a bundle. This is a workaround.
- */
- nop.m 0
- nop.i 0
- /*
- * We need to call schedule_tail() to complete the scheduling process.
- * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the
- * address of the previously executing task.
- */
- br.call.sptk.many rp=ia64_invoke_schedule_tail
-}
-#ifdef XEN
- // new domains are cloned but not exec'ed so switch to user mode here
- cmp.ne pKStk,pUStk=r0,r0
- adds r16 = IA64_VCPU_FLAGS_OFFSET, r13
- ;;
- ld8 r16 = [r16] // arch.arch_vmx.flags
- ;;
- cmp.eq p6,p0 = r16, r0
-(p6) br.cond.spnt ia64_leave_kernel // !VMX_DOMAIN
- ;;
- adds r16 = PT(CR_IFS)+16, r12
- ;;
- ld8 r16 = [r16]
- cmp.eq pNonSys,pSys=r0,r0 // pSys=0,pNonSys=1
- ;;
- cmp.eq p6,p7 = 0x6, r16
-(p7) br.cond.sptk ia64_leave_hypervisor // VMX_DOMAIN
- ;;
- /*
- * cr.ifs.v==0 && cr.ifm(ar.pfm)==6 means that HYPERVISOR_suspend
- * has been called. (i.e. HVM with PV driver is restored here)
- * We need to allocate a dummy RSE stack frame to resume.
- */
- alloc r32=ar.pfs, 0, 0, 6, 0
- cmp.eq pSys,pNonSys=r0,r0 // pSys=1,pNonSys=0
- ;;
- bsw.0
- ;;
- mov r21=r13 // set current
- ;;
- bsw.1
- ;;
- mov r8=r0
- br.cond.sptk.many ia64_leave_hypercall
-#else
-.ret8:
- adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
- ld4 r2=[r2]
- ;;
- mov r8=0
- and r2=_TIF_SYSCALL_TRACEAUDIT,r2
- ;;
- cmp.ne p6,p0=r2,r0
-(p6) br.cond.spnt .strace_check_retval
-#endif
- ;; // added stop bits to prevent r8 dependency
-END(ia64_ret_from_clone)
- // fall through
-GLOBAL_ENTRY(ia64_ret_from_syscall)
- PT_REGS_UNWIND_INFO(0)
- cmp.ge p6,p7=r8,r0 // syscall executed successfully?
- adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
- mov r10=r0 // clear error indication in r10
-#ifndef XEN
-(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
-#endif
-END(ia64_ret_from_syscall)
- // fall through
-/*
- * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
- * need to switch to bank 0 and doesn't restore the scratch registers.
- * To avoid leaking kernel bits, the scratch registers are set to
- * the following known-to-be-safe values:
- *
- * r1: restored (global pointer)
- * r2: cleared
- * r3: 1 (when returning to user-level)
- * r8-r11: restored (syscall return value(s))
- * r12: restored (user-level stack pointer)
- * r13: restored (user-level thread pointer)
- * r14: set to __kernel_syscall_via_epc
- * r15: restored (syscall #)
- * r16-r17: cleared
- * r18: user-level b6
- * r19: cleared
- * r20: user-level ar.fpsr
- * r21: user-level b0
- * r22: cleared
- * r23: user-level ar.bspstore
- * r24: user-level ar.rnat
- * r25: user-level ar.unat
- * r26: user-level ar.pfs
- * r27: user-level ar.rsc
- * r28: user-level ip
- * r29: user-level psr
- * r30: user-level cfm
- * r31: user-level pr
- * f6-f11: cleared
- * pr: restored (user-level pr)
- * b0: restored (user-level rp)
- * b6: restored
- * b7: set to __kernel_syscall_via_epc
- * ar.unat: restored (user-level ar.unat)
- * ar.pfs: restored (user-level ar.pfs)
- * ar.rsc: restored (user-level ar.rsc)
- * ar.rnat: restored (user-level ar.rnat)
- * ar.bspstore: restored (user-level ar.bspstore)
- * ar.fpsr: restored (user-level ar.fpsr)
- * ar.ccv: cleared
- * ar.csd: cleared
- * ar.ssd: cleared
- */
-ENTRY(ia64_leave_syscall)
- PT_REGS_UNWIND_INFO(0)
- /*
- * work.need_resched etc. mustn't get changed by this CPU before it returns to
- * user- or fsys-mode, hence we disable interrupts early on.
- *
- * p6 controls whether current_thread_info()->flags needs to be check for
- * extra work. We always check for extra work when returning to user-level.
- * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
- * is 0. After extra work processing has been completed, execution
- * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
- * needs to be redone.
- */
-#ifdef CONFIG_PREEMPT
- rsm psr.i // disable interrupts
- cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
-(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
- ;;
- .pred.rel.mutex pUStk,pKStk
-(pKStk) ld4 r21=[r20] // r21 <- preempt_count
-(pUStk) mov r21=0 // r21 <- 0
- ;;
- cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
-#else /* !CONFIG_PREEMPT */
-(pUStk) rsm psr.i
- cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
-(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
-#endif
-.work_processed_syscall:
- adds r2=PT(LOADRS)+16,r12
- adds r3=PT(AR_BSPSTORE)+16,r12
-#ifdef XEN
- ;;
-#else
- adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
-(p6) ld4 r31=[r18] // load current_thread_info()->flags
-#endif
- ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
- nop.i 0
- ;;
-#ifndef XEN
- mov r16=ar.bsp // M2 get existing backing store pointer
-#endif
- ld8 r18=[r2],PT(R9)-PT(B6) // load b6
-#ifndef XEN
-(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
-#endif
- ;;
- ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
-#ifndef XEN
-(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
-(p6) br.cond.spnt .work_pending_syscall
-#endif
- ;;
- // start restoring the state saved on the kernel stack (struct pt_regs):
- ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
- ld8 r11=[r3],PT(CR_IIP)-PT(R11)
-(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
- ;;
- invala // M0|1 invalidate ALAT
- rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection
-#ifndef XEN
- cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
-#endif
-
- ld8 r29=[r2],16 // M0|1 load cr.ipsr
- ld8 r28=[r3],16 // M0|1 load cr.iip
- mov r22=r0 // A clear r22
- ;;
- ld8 r30=[r2],16 // M0|1 load cr.ifs
- ld8 r25=[r3],16 // M0|1 load ar.unat
-(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
- ;;
- ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
-(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
- nop 0
- ;;
- ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
- ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
- mov f6=f0 // F clear f6
- ;;
- ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
- ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
- mov f7=f0 // F clear f7
- ;;
- ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
- ld8.fill r1=[r3],16 // M0|1 load r1
-(pUStk) mov r17=1 // A
- ;;
-(pUStk) st1 [r14]=r17 // M2|3
- ld8.fill r13=[r3],16 // M0|1
- mov f8=f0 // F clear f8
- ;;
- ld8.fill r12=[r2] // M0|1 restore r12 (sp)
-#ifdef XEN
- ld8.fill r2=[r3] // M0|1
-#else
- ld8.fill r15=[r3] // M0|1 restore r15
-#endif
- mov b6=r18 // I0 restore b6
-
-#ifdef XEN
- movl r17=THIS_CPU(ia64_phys_stacked_size_p8) // A
-#else
- addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
-#endif
- mov f9=f0 // F clear f9
-(pKStk) br.cond.dpnt.many skip_rbs_switch // B
-
- srlz.d // M0 ensure interruption collection is off (for cover)
- shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
-#ifndef XEN
- cover // B add current frame into dirty partition & set cr.ifs
-#endif
- ;;
-(pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8
- mov r19=ar.bsp // M2 get new backing store pointer
- mov f10=f0 // F clear f10
-
- nop.m 0
-#ifdef XEN
- mov r14=r0
-#else
- movl r14=__kernel_syscall_via_epc // X
-#endif
- ;;
- mov.m ar.csd=r0 // M2 clear ar.csd
- mov.m ar.ccv=r0 // M2 clear ar.ccv
- mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
-
- mov.m ar.ssd=r0 // M2 clear ar.ssd
- mov f11=f0 // F clear f11
- br.cond.sptk.many rbs_switch // B
-END(ia64_leave_syscall)
-
-#ifdef CONFIG_IA32_SUPPORT
-GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
- PT_REGS_UNWIND_INFO(0)
- adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
- adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
- ;;
- .mem.offset 0,0
- st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
- .mem.offset 8,0
- st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
-END(ia64_ret_from_ia32_execve)
- // fall through
-#endif /* CONFIG_IA32_SUPPORT */
-GLOBAL_ENTRY(ia64_leave_kernel)
- PT_REGS_UNWIND_INFO(0)
- /*
- * work.need_resched etc. mustn't get changed by this CPU before it returns to
- * user- or fsys-mode, hence we disable interrupts early on.
- *
- * p6 controls whether current_thread_info()->flags needs to be check for
- * extra work. We always check for extra work when returning to user-level.
- * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
- * is 0. After extra work processing has been completed, execution
- * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
- * needs to be redone.
- */
-#ifdef CONFIG_PREEMPT
- rsm psr.i // disable interrupts
- cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
-(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
- ;;
- .pred.rel.mutex pUStk,pKStk
-(pKStk) ld4 r21=[r20] // r21 <- preempt_count
-(pUStk) mov r21=0 // r21 <- 0
- ;;
- cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
-#else
-(pUStk) rsm psr.i
- cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
-(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
-#endif
-.work_processed_kernel:
-#ifdef XEN
- ;;
-(pUStk) ssm psr.i
-(pUStk) br.call.sptk.many b0=do_softirq
-(pUStk) rsm psr.i
- ;;
-(pUStk) br.call.sptk.many b0=reflect_event
- ;;
- adds r7 = PT(EML_UNAT)+16,r12
- ;;
- ld8 r7 = [r7]
- ;;
- mov ar.unat=r7 /* load eml_unat */
- mov r31=r0
-
-#else
- adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
-(p6) ld4 r31=[r17] // load current_thread_info()->flags
-#endif
- adds r21=PT(PR)+16,r12
- ;;
-
- lfetch [r21],PT(CR_IPSR)-PT(PR)
- adds r2=PT(B6)+16,r12
- adds r3=PT(R16)+16,r12
- ;;
- lfetch [r21]
- ld8 r28=[r2],8 // load b6
- adds r29=PT(R24)+16,r12
-
-#ifdef XEN
- ld8.fill r16=[r3]
- adds r3=PT(AR_CSD)-PT(R16),r3
-#else
- ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
-#endif
- adds r30=PT(AR_CCV)+16,r12
-(p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
- ;;
- ld8.fill r24=[r29]
- ld8 r15=[r30] // load ar.ccv
-(p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
- ;;
- ld8 r29=[r2],16 // load b7
- ld8 r30=[r3],16 // load ar.csd
-#ifndef XEN
-(p6) br.cond.spnt .work_pending
-#endif
- ;;
- ld8 r31=[r2],16 // load ar.ssd
- ld8.fill r8=[r3],16
- ;;
- ld8.fill r9=[r2],16
- ld8.fill r10=[r3],PT(R17)-PT(R10)
- ;;
- ld8.fill r11=[r2],PT(R18)-PT(R11)
- ld8.fill r17=[r3],16
- ;;
- ld8.fill r18=[r2],16
- ld8.fill r19=[r3],16
- ;;
- ld8.fill r20=[r2],16
- ld8.fill r21=[r3],16
- mov ar.csd=r30
- mov ar.ssd=r31
- ;;
- rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
- invala // invalidate ALAT
- ;;
- ld8.fill r22=[r2],24
- ld8.fill r23=[r3],24
- mov b6=r28
- ;;
- ld8.fill r25=[r2],16
- ld8.fill r26=[r3],16
- mov b7=r29
- ;;
- ld8.fill r27=[r2],16
- ld8.fill r28=[r3],16
- ;;
- ld8.fill r29=[r2],16
- ld8.fill r30=[r3],24
- ;;
- ld8.fill r31=[r2],PT(F9)-PT(R31)
- adds r3=PT(F10)-PT(F6),r3
- ;;
- ldf.fill f9=[r2],PT(F6)-PT(F9)
- ldf.fill f10=[r3],PT(F8)-PT(F10)
- ;;
- ldf.fill f6=[r2],PT(F7)-PT(F6)
- ;;
- ldf.fill f7=[r2],PT(F11)-PT(F7)
-#ifdef XEN
- ldf.fill f8=[r3],PT(R5)-PT(F8)
- ;;
- ldf.fill f11=[r2],PT(R4)-PT(F11)
- mov ar.ccv=r15
- ;;
- ld8.fill r4=[r2],16
- ld8.fill r5=[r3],16
- ;;
- ld8.fill r6=[r2]
- ld8.fill r7=[r3]
- ;;
- srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
- ;;
- bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
- ;;
-#else
- ldf.fill f8=[r3],32
- ;;
- srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
- mov ar.ccv=r15
- ;;
- ldf.fill f11=[r2]
- bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
- ;;
-#endif
-#ifdef XEN
-(pUStk) movl r18=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
-(pUStk) ld8 r18=[r18]
-#else
-(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
-#endif
- adds r16=PT(CR_IPSR)+16,r12
- adds r17=PT(CR_IIP)+16,r12
-
-(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
- nop.i 0
- nop.i 0
- ;;
- ld8 r29=[r16],16 // load cr.ipsr
- ld8 r28=[r17],16 // load cr.iip
- ;;
- ld8 r30=[r16],16 // load cr.ifs
- ld8 r25=[r17],16 // load ar.unat
- ;;
- ld8 r26=[r16],16 // load ar.pfs
- ld8 r27=[r17],16 // load ar.rsc
-#ifndef XEN
- cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
-#endif
- ;;
- ld8 r24=[r16],16 // load ar.rnat (may be garbage)
- ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
- ;;
- ld8 r31=[r16],16 // load predicates
- ld8 r21=[r17],16 // load b0
- ;;
- ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
- ld8.fill r1=[r17],16 // load r1
- ;;
- ld8.fill r12=[r16],16
- ld8.fill r13=[r17],16
-(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
- ;;
- ld8 r20=[r16],16 // ar.fpsr
- ld8.fill r15=[r17],16
- ;;
- ld8.fill r14=[r16],16
- ld8.fill r2=[r17]
-(pUStk) mov r17=1
- ;;
- ld8.fill r3=[r16]
-(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
- shr.u r18=r19,16 // get byte size of existing "dirty" partition
- ;;
- mov r16=ar.bsp // get existing backing store pointer
-#ifdef XEN
- movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
-#else
- addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
-#endif
- ;;
- ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
-(pKStk) br.cond.dpnt skip_rbs_switch
-
- /*
- * Restore user backing store.
- *
- * NOTE: alloc, loadrs, and cover can't be predicated.
- */
-(pNonSys) br.cond.dpnt dont_preserve_current_frame
- cover // add current frame into dirty partition and set cr.ifs
- ;;
- mov r19=ar.bsp // get new backing store pointer
-rbs_switch:
- sub r16=r16,r18 // krbs = old bsp - size of dirty partition
-#ifndef XEN
- cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
-#endif
- ;;
- sub r19=r19,r16 // calculate total byte size of dirty partition
- add r18=64,r18 // don't force in0-in7 into memory...
- ;;
- shl r19=r19,16 // shift size of dirty partition into loadrs position
- ;;
-dont_preserve_current_frame:
- /*
- * To prevent leaking bits between the kernel and user-space,
- * we must clear the stacked registers in the "invalid" partition here.
- * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
- * 5 registers/cycle on McKinley).
- */
-# define pRecurse p6
-# define pReturn p7
-#ifdef CONFIG_ITANIUM
-# define Nregs 10
-#else
-# define Nregs 14
-#endif
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
- sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
- ;;
- mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
- shladd in0=loc1,3,r17
- mov in1=0
- ;;
- TEXT_ALIGN(32)
-rse_clear_invalid:
-#ifdef CONFIG_ITANIUM
- // cycle 0
- { .mii
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
- add out0=-Nregs*8,in0
-}{ .mfb
- add out1=1,in1 // increment recursion count
- nop.f 0
- nop.b 0 // can't do br.call here because of alloc (WAW on CFM)
- ;;
-}{ .mfi // cycle 1
- mov loc1=0
- nop.f 0
- mov loc2=0
-}{ .mib
- mov loc3=0
- mov loc4=0
-(pRecurse) br.call.sptk.many b0=rse_clear_invalid
-
-}{ .mfi // cycle 2
- mov loc5=0
- nop.f 0
- cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
-}{ .mib
- mov loc6=0
- mov loc7=0
-(pReturn) br.ret.sptk.many b0
-}
-#else /* !CONFIG_ITANIUM */
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
- add out0=-Nregs*8,in0
- add out1=1,in1 // increment recursion count
- mov loc1=0
- mov loc2=0
- ;;
- mov loc3=0
- mov loc4=0
- mov loc5=0
- mov loc6=0
- mov loc7=0
-(pRecurse) br.call.dptk.few b0=rse_clear_invalid
- ;;
- mov loc8=0
- mov loc9=0
- cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
- mov loc10=0
- mov loc11=0
-(pReturn) br.ret.dptk.many b0
-#endif /* !CONFIG_ITANIUM */
-# undef pRecurse
-# undef pReturn
- ;;
- alloc r17=ar.pfs,0,0,0,0 // drop current register frame
- ;;
- loadrs
- ;;
-skip_rbs_switch:
- mov ar.unat=r25 // M2
-(pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22
-(pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise
- ;;
-(pUStk) mov ar.bspstore=r23 // M2
-(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
-(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
- ;;
- mov cr.ipsr=r29 // M2
- mov ar.pfs=r26 // I0
-(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
-#ifdef XEN
- mov cr.ifs=r30 // M2
-#else
-(p9) mov cr.ifs=r30 // M2
-#endif
- mov b0=r21 // I0
-(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
-
- mov ar.fpsr=r20 // M2
- mov cr.iip=r28 // M2
- nop 0
- ;;
-(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
- nop 0
-#ifdef XEN
-(pLvSys)mov r15=r0
-#else
-(pLvSys)mov r2=r0
-#endif
-
- mov ar.rsc=r27 // M2
- mov pr=r31,-1 // I0
- rfi // B
-
-#ifndef XEN
- /*
- * On entry:
- * r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
- * r31 = current->thread_info->flags
- * On exit:
- * p6 = TRUE if work-pending-check needs to be redone
- */
-.work_pending_syscall:
- add r2=-8,r2
- add r3=-8,r3
- ;;
- st8 [r2]=r8
- st8 [r3]=r10
-.work_pending:
- tbit.nz p6,p0=r31,TIF_SIGDELAYED // signal delayed from MCA/INIT/NMI/PMI context?
-(p6) br.cond.sptk.few .sigdelayed
- ;;
- tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
-(p6) br.cond.sptk.few .notify
-#ifdef CONFIG_PREEMPT
-(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
- ;;
-(pKStk) st4 [r20]=r21
- ssm psr.i // enable interrupts
-#endif
- br.call.spnt.many rp=schedule
-.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
- rsm psr.i // disable interrupts
- ;;
-#ifdef CONFIG_PREEMPT
-(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
- ;;
-(pKStk) st4 [r20]=r0 // preempt_count() <- 0
-#endif
-(pLvSys)br.cond.sptk.few .work_pending_syscall_end
- br.cond.sptk.many .work_processed_kernel // re-check
-
-.notify:
-(pUStk) br.call.spnt.many rp=notify_resume_user
-.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
-(pLvSys)br.cond.sptk.few .work_pending_syscall_end
- br.cond.sptk.many .work_processed_kernel // don't re-check
-
-// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
-// it could not be delivered. Deliver it now. The signal might be for us and
-// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
-// signal.
-
-.sigdelayed:
- br.call.sptk.many rp=do_sigdelayed
- cmp.eq p6,p0=r0,r0 // p6 <- 1, always re-check
-(pLvSys)br.cond.sptk.few .work_pending_syscall_end
- br.cond.sptk.many .work_processed_kernel // re-check
-
-.work_pending_syscall_end:
- adds r2=PT(R8)+16,r12
- adds r3=PT(R10)+16,r12
- ;;
- ld8 r8=[r2]
- ld8 r10=[r3]
- br.cond.sptk.many .work_processed_syscall // re-check
-#endif
-
-END(ia64_leave_kernel)
-
-ENTRY(handle_syscall_error)
- /*
- * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
- * lead us to mistake a negative return value as a failed syscall. Those syscall
- * must deposit a non-zero value in pt_regs.r8 to indicate an error. If
- * pt_regs.r8 is zero, we assume that the call completed successfully.
- */
- PT_REGS_UNWIND_INFO(0)
- ld8 r3=[r2] // load pt_regs.r8
- ;;
- cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0?
- ;;
-(p7) mov r10=-1
-(p7) sub r8=0,r8 // negate return value to get errno
- br.cond.sptk ia64_leave_syscall
-END(handle_syscall_error)
-
- /*
- * Invoke schedule_tail(task) while preserving in0-in7, which may be needed
- * in case a system call gets restarted.
- */
-GLOBAL_ENTRY(ia64_invoke_schedule_tail)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,1,0
- mov loc0=rp
- mov out0=r8 // Address of previous task
- ;;
- br.call.sptk.many rp=schedule_tail
-.ret11: mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(ia64_invoke_schedule_tail)
-
-#ifndef XEN
- /*
- * Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to
- * be set up by the caller. We declare 8 input registers so the system call
- * args get preserved, in case we need to restart a system call.
- */
-ENTRY(notify_resume_user)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
- mov r9=ar.unat
- mov loc0=rp // save return address
- mov out0=0 // there is no "oldset"
- adds out1=8,sp // out1=&sigscratch->ar_pfs
-(pSys) mov out2=1 // out2==1 => we're in a syscall
- ;;
-(pNonSys) mov out2=0 // out2==0 => not a syscall
- .fframe 16
- .spillsp ar.unat, 16
- st8 [sp]=r9,-16 // allocate space for ar.unat and save it
- st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
- .body
- br.call.sptk.many rp=do_notify_resume_user
-.ret15: .restore sp
- adds sp=16,sp // pop scratch stack space
- ;;
- ld8 r9=[sp] // load new unat from sigscratch->scratch_unat
- mov rp=loc0
- ;;
- mov ar.unat=r9
- mov ar.pfs=loc1
- br.ret.sptk.many rp
-END(notify_resume_user)
-
-GLOBAL_ENTRY(sys_rt_sigsuspend)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
- mov r9=ar.unat
- mov loc0=rp // save return address
- mov out0=in0 // mask
- mov out1=in1 // sigsetsize
- adds out2=8,sp // out2=&sigscratch->ar_pfs
- ;;
- .fframe 16
- .spillsp ar.unat, 16
- st8 [sp]=r9,-16 // allocate space for ar.unat and save it
- st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch
- .body
- br.call.sptk.many rp=ia64_rt_sigsuspend
-.ret17: .restore sp
- adds sp=16,sp // pop scratch stack space
- ;;
- ld8 r9=[sp] // load new unat from sw->caller_unat
- mov rp=loc0
- ;;
- mov ar.unat=r9
- mov ar.pfs=loc1
- br.ret.sptk.many rp
-END(sys_rt_sigsuspend)
-
-ENTRY(sys_rt_sigreturn)
- PT_REGS_UNWIND_INFO(0)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- alloc r2=ar.pfs,8,0,1,0
- .prologue
- PT_REGS_SAVES(16)
- adds sp=-16,sp
- .body
- cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall...
- ;;
- /*
- * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined
- * syscall-entry path does not save them we save them here instead. Note: we
- * don't need to save any other registers that are not saved by the stream-lined
- * syscall path, because restore_sigcontext() restores them.
- */
- adds r16=PT(F6)+32,sp
- adds r17=PT(F7)+32,sp
- ;;
- stf.spill [r16]=f6,32
- stf.spill [r17]=f7,32
- ;;
- stf.spill [r16]=f8,32
- stf.spill [r17]=f9,32
- ;;
- stf.spill [r16]=f10
- stf.spill [r17]=f11
- adds out0=16,sp // out0 = &sigscratch
- br.call.sptk.many rp=ia64_rt_sigreturn
-.ret19: .restore sp,0
- adds sp=16,sp
- ;;
- ld8 r9=[sp] // load new ar.unat
- mov.sptk b7=r8,ia64_leave_kernel
- ;;
- mov ar.unat=r9
- br.many b7
-END(sys_rt_sigreturn)
-#endif
-
-GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
- .prologue
- /*
- * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
- */
- mov r16=r0
- DO_SAVE_SWITCH_STACK
- br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt
-.ret21: .body
- DO_LOAD_SWITCH_STACK
- br.cond.sptk.many rp // goes to ia64_leave_kernel
-END(ia64_prepare_handle_unaligned)
-
- //
- // unw_init_running(void (*callback)(info, arg), void *arg)
- //
-# define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
-
-GLOBAL_ENTRY(unw_init_running)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
- alloc loc1=ar.pfs,2,3,3,0
- ;;
- ld8 loc2=[in0],8
- mov loc0=rp
- mov r16=loc1
- DO_SAVE_SWITCH_STACK
- .body
-
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
- .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
- SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
- adds sp=-EXTRA_FRAME_SIZE,sp
- .body
- ;;
- adds out0=16,sp // &info
- mov out1=r13 // current
- adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack
- br.call.sptk.many rp=unw_init_frame_info
-1: adds out0=16,sp // &info
- mov b6=loc2
- mov loc2=gp // save gp across indirect function call
- ;;
- ld8 gp=[in0]
- mov out1=in1 // arg
- br.call.sptk.many rp=b6 // invoke the callback function
-1: mov gp=loc2 // restore gp
-
- // For now, we don't allow changing registers from within
- // unw_init_running; if we ever want to allow that, we'd
- // have to do a load_switch_stack here:
- .restore sp
- adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
-
- mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(unw_init_running)
-
-#ifdef XEN
-GLOBAL_ENTRY(ia64_do_multicall_call)
- movl r2=ia64_hypercall_table;;
- shladd r2=r38,3,r2;;
- ld8 r2=[r2];;
- mov b6=r2
- br.sptk.many b6;;
-END(ia64_do_multicall_call)
-
-
- .rodata
- .align 8
- .globl ia64_hypercall_table
-ia64_hypercall_table:
- data8 do_ni_hypercall /* do_set_trap_table *//* 0 */
- data8 do_ni_hypercall /* do_mmu_update */
- data8 do_ni_hypercall /* do_set_gdt */
- data8 do_ni_hypercall /* do_stack_switch */
- data8 do_ni_hypercall /* do_set_callbacks */
- data8 do_ni_hypercall /* do_fpu_taskswitch *//* 5 */
- data8 do_sched_op_compat
- data8 do_platform_op
- data8 do_ni_hypercall /* do_set_debugreg */
- data8 do_ni_hypercall /* do_get_debugreg */
- data8 do_ni_hypercall /* do_update_descriptor * 10 */
- data8 do_ni_hypercall /* do_ni_hypercall */
- data8 do_memory_op
- data8 do_multicall
- data8 do_ni_hypercall /* do_update_va_mapping */
- data8 do_ni_hypercall /* do_set_timer_op */ /* 15 */
- data8 do_ni_hypercall
- data8 do_xen_version
- data8 do_console_io
- data8 do_ni_hypercall
- data8 do_grant_table_op /* 20 */
- data8 do_ni_hypercall /* do_vm_assist */
- data8 do_ni_hypercall /* do_update_va_mapping_othe */
- data8 do_ni_hypercall /* (x86 only) */
- data8 do_vcpu_op /* do_vcpu_op */
- data8 do_ni_hypercall /* (x86_64 only) */ /* 25 */
- data8 do_ni_hypercall /* do_mmuext_op */
- data8 do_ni_hypercall /* do_acm_op */
- data8 do_ni_hypercall /* do_nmi_op */
- data8 do_sched_op
- data8 do_callback_op /* */ /* 30 */
- data8 do_xenoprof_op /* */
- data8 do_event_channel_op
- data8 do_physdev_op
- data8 do_hvm_op /* */
- data8 do_sysctl /* */ /* 35 */
- data8 do_domctl /* */
- data8 do_kexec_op /* */
- data8 do_tmem_op /* */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */ /* 40 */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */ /* 45 */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */
- data8 do_dom0vp_op /* dom0vp_op */
- data8 do_pirq_guest_eoi /* arch_1 */
- data8 do_ia64_debug_op /* arch_2 */ /* 50 */
- data8 do_ni_hypercall /* arch_3 */
- data8 do_ni_hypercall /* arch_4 */
- data8 do_ni_hypercall /* arch_5 */
- data8 do_ni_hypercall /* arch_6 */
- data8 do_ni_hypercall /* arch_7 */ /* 55 */
- data8 do_ni_hypercall
- data8 do_ni_hypercall
- data8 do_ni_hypercall
- data8 do_ni_hypercall
- data8 do_ni_hypercall /* 60 */
- data8 do_ni_hypercall
- data8 do_ni_hypercall
- data8 do_ni_hypercall
-
- // guard against failures to increase NR_hypercalls
- .org ia64_hypercall_table + 8*NR_hypercalls
-
-#else
- .rodata
- .align 8
- .globl sys_call_table
-sys_call_table:
- data8 sys_ni_syscall // This must be sys_ni_syscall! See ivt.S.
- data8 sys_exit // 1025
- data8 sys_read
- data8 sys_write
- data8 sys_open
- data8 sys_close
- data8 sys_creat // 1030
- data8 sys_link
- data8 sys_unlink
- data8 ia64_execve
- data8 sys_chdir
- data8 sys_fchdir // 1035
- data8 sys_utimes
- data8 sys_mknod
- data8 sys_chmod
- data8 sys_chown
- data8 sys_lseek // 1040
- data8 sys_getpid
- data8 sys_getppid
- data8 sys_mount
- data8 sys_umount
- data8 sys_setuid // 1045
- data8 sys_getuid
- data8 sys_geteuid
- data8 sys_ptrace
- data8 sys_access
- data8 sys_sync // 1050
- data8 sys_fsync
- data8 sys_fdatasync
- data8 sys_kill
- data8 sys_rename
- data8 sys_mkdir // 1055
- data8 sys_rmdir
- data8 sys_dup
- data8 sys_pipe
- data8 sys_times
- data8 ia64_brk // 1060
- data8 sys_setgid
- data8 sys_getgid
- data8 sys_getegid
- data8 sys_acct
- data8 sys_ioctl // 1065
- data8 sys_fcntl
- data8 sys_umask
- data8 sys_chroot
- data8 sys_ustat
- data8 sys_dup2 // 1070
- data8 sys_setreuid
- data8 sys_setregid
- data8 sys_getresuid
- data8 sys_setresuid
- data8 sys_getresgid // 1075
- data8 sys_setresgid
- data8 sys_getgroups
- data8 sys_setgroups
- data8 sys_getpgid
- data8 sys_setpgid // 1080
- data8 sys_setsid
- data8 sys_getsid
- data8 sys_sethostname
- data8 sys_setrlimit
- data8 sys_getrlimit // 1085
- data8 sys_getrusage
- data8 sys_gettimeofday
- data8 sys_settimeofday
- data8 sys_select
- data8 sys_poll // 1090
- data8 sys_symlink
- data8 sys_readlink
- data8 sys_uselib
- data8 sys_swapon
- data8 sys_swapoff // 1095
- data8 sys_reboot
- data8 sys_truncate
- data8 sys_ftruncate
- data8 sys_fchmod
- data8 sys_fchown // 1100
- data8 ia64_getpriority
- data8 sys_setpriority
- data8 sys_statfs
- data8 sys_fstatfs
- data8 sys_gettid // 1105
- data8 sys_semget
- data8 sys_semop
- data8 sys_semctl
- data8 sys_msgget
- data8 sys_msgsnd // 1110
- data8 sys_msgrcv
- data8 sys_msgctl
- data8 sys_shmget
- data8 sys_shmat
- data8 sys_shmdt // 1115
- data8 sys_shmctl
- data8 sys_syslog
- data8 sys_setitimer
- data8 sys_getitimer
- data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */
- data8 sys_ni_syscall /* was: ia64_oldlstat */
- data8 sys_ni_syscall /* was: ia64_oldfstat */
- data8 sys_vhangup
- data8 sys_lchown
- data8 sys_remap_file_pages // 1125
- data8 sys_wait4
- data8 sys_sysinfo
- data8 sys_clone
- data8 sys_setdomainname
- data8 sys_newuname // 1130
- data8 sys_adjtimex
- data8 sys_ni_syscall /* was: ia64_create_module */
- data8 sys_init_module
- data8 sys_delete_module
- data8 sys_ni_syscall // 1135 /* was: sys_get_kernel_syms */
- data8 sys_ni_syscall /* was: sys_query_module */
- data8 sys_quotactl
- data8 sys_bdflush
- data8 sys_sysfs
- data8 sys_personality // 1140
- data8 sys_ni_syscall // sys_afs_syscall
- data8 sys_setfsuid
- data8 sys_setfsgid
- data8 sys_getdents
- data8 sys_flock // 1145
- data8 sys_readv
- data8 sys_writev
- data8 sys_pread64
- data8 sys_pwrite64
- data8 sys_sysctl // 1150
- data8 sys_mmap
- data8 sys_munmap
- data8 sys_mlock
- data8 sys_mlockall
- data8 sys_mprotect // 1155
- data8 ia64_mremap
- data8 sys_msync
- data8 sys_munlock
- data8 sys_munlockall
- data8 sys_sched_getparam // 1160
- data8 sys_sched_setparam
- data8 sys_sched_getscheduler
- data8 sys_sched_setscheduler
- data8 sys_sched_yield
- data8 sys_sched_get_priority_max // 1165
- data8 sys_sched_get_priority_min
- data8 sys_sched_rr_get_interval
- data8 sys_nanosleep
- data8 sys_nfsservctl
- data8 sys_prctl // 1170
- data8 sys_getpagesize
- data8 sys_mmap2
- data8 sys_pciconfig_read
- data8 sys_pciconfig_write
- data8 sys_perfmonctl // 1175
- data8 sys_sigaltstack
- data8 sys_rt_sigaction
- data8 sys_rt_sigpending
- data8 sys_rt_sigprocmask
- data8 sys_rt_sigqueueinfo // 1180
- data8 sys_rt_sigreturn
- data8 sys_rt_sigsuspend
- data8 sys_rt_sigtimedwait
- data8 sys_getcwd
- data8 sys_capget // 1185
- data8 sys_capset
- data8 sys_sendfile64
- data8 sys_ni_syscall // sys_getpmsg (STREAMS)
- data8 sys_ni_syscall // sys_putpmsg (STREAMS)
- data8 sys_socket // 1190
- data8 sys_bind
- data8 sys_connect
- data8 sys_listen
- data8 sys_accept
- data8 sys_getsockname // 1195
- data8 sys_getpeername
- data8 sys_socketpair
- data8 sys_send
- data8 sys_sendto
- data8 sys_recv // 1200
- data8 sys_recvfrom
- data8 sys_shutdown
- data8 sys_setsockopt
- data8 sys_getsockopt
- data8 sys_sendmsg // 1205
- data8 sys_recvmsg
- data8 sys_pivot_root
- data8 sys_mincore
- data8 sys_madvise
- data8 sys_newstat // 1210
- data8 sys_newlstat
- data8 sys_newfstat
- data8 sys_clone2
- data8 sys_getdents64
- data8 sys_getunwind // 1215
- data8 sys_readahead
- data8 sys_setxattr
- data8 sys_lsetxattr
- data8 sys_fsetxattr
- data8 sys_getxattr // 1220
- data8 sys_lgetxattr
- data8 sys_fgetxattr
- data8 sys_listxattr
- data8 sys_llistxattr
- data8 sys_flistxattr // 1225
- data8 sys_removexattr
- data8 sys_lremovexattr
- data8 sys_fremovexattr
- data8 sys_tkill
- data8 sys_futex // 1230
- data8 sys_sched_setaffinity
- data8 sys_sched_getaffinity
- data8 sys_set_tid_address
- data8 sys_fadvise64_64
- data8 sys_tgkill // 1235
- data8 sys_exit_group
- data8 sys_lookup_dcookie
- data8 sys_io_setup
- data8 sys_io_destroy
- data8 sys_io_getevents // 1240
- data8 sys_io_submit
- data8 sys_io_cancel
- data8 sys_epoll_create
- data8 sys_epoll_ctl
- data8 sys_epoll_wait // 1245
- data8 sys_restart_syscall
- data8 sys_semtimedop
- data8 sys_timer_create
- data8 sys_timer_settime
- data8 sys_timer_gettime // 1250
- data8 sys_timer_getoverrun
- data8 sys_timer_delete
- data8 sys_clock_settime
- data8 sys_clock_gettime
- data8 sys_clock_getres // 1255
- data8 sys_clock_nanosleep
- data8 sys_fstatfs64
- data8 sys_statfs64
- data8 sys_mbind
- data8 sys_get_mempolicy // 1260
- data8 sys_set_mempolicy
- data8 sys_mq_open
- data8 sys_mq_unlink
- data8 sys_mq_timedsend
- data8 sys_mq_timedreceive // 1265
- data8 sys_mq_notify
- data8 sys_mq_getsetattr
- data8 sys_ni_syscall // reserved for kexec_load
- data8 sys_ni_syscall // reserved for vserver
- data8 sys_waitid // 1270
- data8 sys_add_key
- data8 sys_request_key
- data8 sys_keyctl
- data8 sys_ioprio_set
- data8 sys_ioprio_get // 1275
- data8 sys_ni_syscall
- data8 sys_inotify_init
- data8 sys_inotify_add_watch
- data8 sys_inotify_rm_watch
-
- .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
-#endif
diff --git a/xen/arch/ia64/linux-xen/entry.h b/xen/arch/ia64/linux-xen/entry.h
deleted file mode 100644
index 6dbe978f73..0000000000
--- a/xen/arch/ia64/linux-xen/entry.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#include <linux/config.h>
-
-/*
- * Preserved registers that are shared between code in ivt.S and
- * entry.S. Be careful not to step on these!
- */
-#define PRED_LEAVE_SYSCALL 1 /* TRUE iff leave from syscall */
-#define PRED_KERNEL_STACK 2 /* returning to kernel-stacks? */
-#define PRED_USER_STACK 3 /* returning to user-stacks? */
-#define PRED_SYSCALL 4 /* inside a system call? */
-#define PRED_NON_SYSCALL 5 /* complement of PRED_SYSCALL */
-
-#ifdef __ASSEMBLY__
-# define PASTE2(x,y) x##y
-# define PASTE(x,y) PASTE2(x,y)
-
-# define pLvSys PASTE(p,PRED_LEAVE_SYSCALL)
-# define pKStk PASTE(p,PRED_KERNEL_STACK)
-# define pUStk PASTE(p,PRED_USER_STACK)
-# define pSys PASTE(p,PRED_SYSCALL)
-# define pNonSys PASTE(p,PRED_NON_SYSCALL)
-#endif
-
-#define PT(f) (IA64_PT_REGS_##f##_OFFSET)
-#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET)
-#ifdef XEN
-#define VPD(f) (VPD_##f##_START_OFFSET)
-#endif
-
-#define PT_REGS_SAVES(off) \
- .unwabi 3, 'i'; \
- .fframe IA64_PT_REGS_SIZE+16+(off); \
- .spillsp rp, PT(CR_IIP)+16+(off); \
- .spillsp ar.pfs, PT(CR_IFS)+16+(off); \
- .spillsp ar.unat, PT(AR_UNAT)+16+(off); \
- .spillsp ar.fpsr, PT(AR_FPSR)+16+(off); \
- .spillsp pr, PT(PR)+16+(off);
-
-#define PT_REGS_UNWIND_INFO(off) \
- .prologue; \
- PT_REGS_SAVES(off); \
- .body
-
-#define SWITCH_STACK_SAVES(off) \
- .savesp ar.unat,SW(CALLER_UNAT)+16+(off); \
- .savesp ar.fpsr,SW(AR_FPSR)+16+(off); \
- .spillsp f2,SW(F2)+16+(off); .spillsp f3,SW(F3)+16+(off); \
- .spillsp f4,SW(F4)+16+(off); .spillsp f5,SW(F5)+16+(off); \
- .spillsp f16,SW(F16)+16+(off); .spillsp f17,SW(F17)+16+(off); \
- .spillsp f18,SW(F18)+16+(off); .spillsp f19,SW(F19)+16+(off); \
- .spillsp f20,SW(F20)+16+(off); .spillsp f21,SW(F21)+16+(off); \
- .spillsp f22,SW(F22)+16+(off); .spillsp f23,SW(F23)+16+(off); \
- .spillsp f24,SW(F24)+16+(off); .spillsp f25,SW(F25)+16+(off); \
- .spillsp f26,SW(F26)+16+(off); .spillsp f27,SW(F27)+16+(off); \
- .spillsp f28,SW(F28)+16+(off); .spillsp f29,SW(F29)+16+(off); \
- .spillsp f30,SW(F30)+16+(off); .spillsp f31,SW(F31)+16+(off); \
- .spillsp r4,SW(R4)+16+(off); .spillsp r5,SW(R5)+16+(off); \
- .spillsp r6,SW(R6)+16+(off); .spillsp r7,SW(R7)+16+(off); \
- .spillsp b0,SW(B0)+16+(off); .spillsp b1,SW(B1)+16+(off); \
- .spillsp b2,SW(B2)+16+(off); .spillsp b3,SW(B3)+16+(off); \
- .spillsp b4,SW(B4)+16+(off); .spillsp b5,SW(B5)+16+(off); \
- .spillsp ar.pfs,SW(AR_PFS)+16+(off); .spillsp ar.lc,SW(AR_LC)+16+(off); \
- .spillsp @priunat,SW(AR_UNAT)+16+(off); \
- .spillsp ar.rnat,SW(AR_RNAT)+16+(off); \
- .spillsp ar.bspstore,SW(AR_BSPSTORE)+16+(off); \
- .spillsp pr,SW(PR)+16+(off)
-
-#define DO_SAVE_SWITCH_STACK \
- movl r28=1f; \
- ;; \
- .fframe IA64_SWITCH_STACK_SIZE; \
- adds sp=-IA64_SWITCH_STACK_SIZE,sp; \
- mov.ret.sptk b7=r28,1f; \
- SWITCH_STACK_SAVES(0); \
- br.cond.sptk.many save_switch_stack; \
-1:
-
-#define DO_LOAD_SWITCH_STACK \
- movl r28=1f; \
- ;; \
- invala; \
- mov.ret.sptk b7=r28,1f; \
- br.cond.sptk.many load_switch_stack; \
-1: .restore sp; \
- adds sp=IA64_SWITCH_STACK_SIZE,sp
diff --git a/xen/arch/ia64/linux-xen/head.S b/xen/arch/ia64/linux-xen/head.S
deleted file mode 100644
index c1fa5ed21d..0000000000
--- a/xen/arch/ia64/linux-xen/head.S
+++ /dev/null
@@ -1,1298 +0,0 @@
-/*
- * Here is where the ball gets rolling as far as the kernel is concerned.
- * When control is transferred to _start, the bootload has already
- * loaded us to the correct address. All that's left to do here is
- * to set up the kernel's global pointer and jump to the kernel
- * entry point.
- *
- * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999 Intel Corp.
- * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@intel.com>
- * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
- * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com>
- * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
- * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
- * Support for CPU Hotplug
- */
-
-#include <linux/config.h>
-
-#include <asm/asmmacro.h>
-#include <asm/fpu.h>
-#include <asm/kregs.h>
-#include <asm/mmu_context.h>
-#include <asm/offsets.h>
-#include <asm/pal.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-#include <asm/mca_asm.h>
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define SAL_PSR_BITS_TO_SET \
- (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL)
-
-#define SAVE_FROM_REG(src, ptr, dest) \
- mov dest=src;; \
- st8 [ptr]=dest,0x08
-
-#define RESTORE_REG(reg, ptr, _tmp) \
- ld8 _tmp=[ptr],0x08;; \
- mov reg=_tmp
-
-#define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\
- mov ar.lc=IA64_NUM_DBG_REGS-1;; \
- mov _idx=0;; \
-1: \
- SAVE_FROM_REG(_breg[_idx], ptr, _dest);; \
- add _idx=1,_idx;; \
- br.cloop.sptk.many 1b
-
-#define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\
- mov ar.lc=IA64_NUM_DBG_REGS-1;; \
- mov _idx=0;; \
-_lbl: RESTORE_REG(_breg[_idx], ptr, _tmp);; \
- add _idx=1, _idx;; \
- br.cloop.sptk.many _lbl
-
-#define SAVE_ONE_RR(num, _reg, _tmp) \
- movl _tmp=(num<<61);; \
- mov _reg=rr[_tmp]
-
-#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
- SAVE_ONE_RR(0,_r0, _tmp);; \
- SAVE_ONE_RR(1,_r1, _tmp);; \
- SAVE_ONE_RR(2,_r2, _tmp);; \
- SAVE_ONE_RR(3,_r3, _tmp);; \
- SAVE_ONE_RR(4,_r4, _tmp);; \
- SAVE_ONE_RR(5,_r5, _tmp);; \
- SAVE_ONE_RR(6,_r6, _tmp);; \
- SAVE_ONE_RR(7,_r7, _tmp);;
-
-#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
- st8 [ptr]=_r0, 8;; \
- st8 [ptr]=_r1, 8;; \
- st8 [ptr]=_r2, 8;; \
- st8 [ptr]=_r3, 8;; \
- st8 [ptr]=_r4, 8;; \
- st8 [ptr]=_r5, 8;; \
- st8 [ptr]=_r6, 8;; \
- st8 [ptr]=_r7, 8;;
-
-#define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \
- mov ar.lc=0x08-1;; \
- movl _idx1=0x00;; \
-RestRR: \
- dep.z _idx2=_idx1,61,3;; \
- ld8 _tmp=[ptr],8;; \
- mov rr[_idx2]=_tmp;; \
- srlz.d;; \
- add _idx1=1,_idx1;; \
- br.cloop.sptk.few RestRR
-
-#define SET_AREA_FOR_BOOTING_CPU(reg1, reg2) \
- movl reg1=sal_state_for_booting_cpu;; \
- ld8 reg2=[reg1];;
-
-/*
- * Adjust region registers saved before starting to save
- * break regs and rest of the states that need to be preserved.
- */
-#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred) \
- SAVE_FROM_REG(b0,_reg1,_reg2);; \
- SAVE_FROM_REG(b1,_reg1,_reg2);; \
- SAVE_FROM_REG(b2,_reg1,_reg2);; \
- SAVE_FROM_REG(b3,_reg1,_reg2);; \
- SAVE_FROM_REG(b4,_reg1,_reg2);; \
- SAVE_FROM_REG(b5,_reg1,_reg2);; \
- st8 [_reg1]=r1,0x08;; \
- st8 [_reg1]=r12,0x08;; \
- st8 [_reg1]=r13,0x08;; \
- SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \
- SAVE_FROM_REG(ar.pfs,_reg1,_reg2);; \
- SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \
- SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \
- SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.dcr,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.iva,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.pta,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.itv,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.pmv,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);; \
- st8 [_reg1]=r4,0x08;; \
- st8 [_reg1]=r5,0x08;; \
- st8 [_reg1]=r6,0x08;; \
- st8 [_reg1]=r7,0x08;; \
- st8 [_reg1]=_pred,0x08;; \
- SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \
- stf.spill.nta [_reg1]=f2,16;; \
- stf.spill.nta [_reg1]=f3,16;; \
- stf.spill.nta [_reg1]=f4,16;; \
- stf.spill.nta [_reg1]=f5,16;; \
- stf.spill.nta [_reg1]=f16,16;; \
- stf.spill.nta [_reg1]=f17,16;; \
- stf.spill.nta [_reg1]=f18,16;; \
- stf.spill.nta [_reg1]=f19,16;; \
- stf.spill.nta [_reg1]=f20,16;; \
- stf.spill.nta [_reg1]=f21,16;; \
- stf.spill.nta [_reg1]=f22,16;; \
- stf.spill.nta [_reg1]=f23,16;; \
- stf.spill.nta [_reg1]=f24,16;; \
- stf.spill.nta [_reg1]=f25,16;; \
- stf.spill.nta [_reg1]=f26,16;; \
- stf.spill.nta [_reg1]=f27,16;; \
- stf.spill.nta [_reg1]=f28,16;; \
- stf.spill.nta [_reg1]=f29,16;; \
- stf.spill.nta [_reg1]=f30,16;; \
- stf.spill.nta [_reg1]=f31,16;;
-
-#else
-#define SET_AREA_FOR_BOOTING_CPU(a1, a2)
-#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2, a3)
-#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
-#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
-#endif
-
-#ifdef XEN
-#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \
- movl _tmp1=(num << 61);; \
- movl _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \
- mov rr[_tmp1]=_tmp2
-#else
-#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \
- movl _tmp1=(num << 61);; \
- mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \
- mov rr[_tmp1]=_tmp2
-#endif
-
- .section __special_page_section,"ax"
-
- .global empty_zero_page
-empty_zero_page:
- .skip PAGE_SIZE
-
-#ifndef XEN
- .global swapper_pg_dir
-swapper_pg_dir:
- .skip PAGE_SIZE
-#endif
-
-#if defined(XEN) && defined(CONFIG_VIRTUAL_FRAME_TABLE)
- .global frametable_pg_dir
-frametable_pg_dir:
- .skip PAGE_SIZE
-#endif
-
- .rodata
-halt_msg:
- stringz "Halting kernel\n"
-
- .text
-
- .global start_ap
-
- /*
- * Start the kernel. When the bootloader passes control to _start(), r28
- * points to the address of the boot parameter area. Execution reaches
- * here in physical mode.
- */
-GLOBAL_ENTRY(_start)
-start_ap:
- .prologue
- .save rp, r0 // terminate unwind chain with a NULL rp
- .body
-
- rsm psr.i | psr.ic
- ;;
- srlz.i
- ;;
- /*
- * Save the region registers, predicate before they get clobbered
- */
- SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15);
- mov r25=pr;;
-
- /*
- * Initialize kernel region registers:
- * rr[0]: VHPT enabled, page size = PAGE_SHIFT
- * rr[1]: VHPT enabled, page size = PAGE_SHIFT
- * rr[2]: VHPT enabled, page size = PAGE_SHIFT
- * rr[3]: VHPT enabled, page size = PAGE_SHIFT
- * rr[4]: VHPT enabled, page size = PAGE_SHIFT
- * rr[5]: VHPT enabled, page size = PAGE_SHIFT
- * rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
- * rr[7]: VHPT disabled, page size = IA64_GRANULE_SHIFT
- * We initialize all of them to prevent inadvertently assuming
- * something about the state of address translation early in boot.
- */
- SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);;
- SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);;
- /*
- * Now pin mappings into the TLB for kernel text and data
- */
- mov r18=KERNEL_TR_PAGE_SHIFT<<2
- movl r17=KERNEL_START
- ;;
- mov cr.itir=r18
- mov cr.ifa=r17
- mov r16=IA64_TR_KERNEL
- mov r3=ip
- movl r18=PAGE_KERNEL
- ;;
- dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
- ;;
- or r18=r2,r18
- ;;
- srlz.i
- ;;
- itr.i itr[r16]=r18
- ;;
- itr.d dtr[r16]=r18
- ;;
- srlz.i
-
- /*
- * Switch into virtual mode:
- */
-#ifdef XEN
- movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
- |IA64_PSR_DI|IA64_PSR_AC)
-#else
- movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
- |IA64_PSR_DI)
-#endif
- ;;
- mov cr.ipsr=r16
- movl r17=1f
- ;;
- mov cr.iip=r17
- mov cr.ifs=r0
- ;;
- rfi
- ;;
-1: // now we are in virtual mode
-
- SET_AREA_FOR_BOOTING_CPU(r2, r16);
-
- STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15);
- SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25)
- ;;
-
- // set IVT entry point---can't access I/O ports without it
- movl r3=ia64_ivt
- ;;
- mov cr.iva=r3
- movl r2=FPSR_DEFAULT
- ;;
- srlz.i
- movl gp=__gp
- ;;
- mov ar.fpsr=r2
- ;;
-
-#define isAP p2 // are we an Application Processor?
-#define isBP p3 // are we the Bootstrap Processor?
-
-#ifdef XEN
-# define init_task init_task_mem
-#endif
-
-#ifdef CONFIG_SMP
- /*
- * Find the init_task for the currently booting CPU. At poweron, and in
- * UP mode, task_for_booting_cpu is NULL.
- */
- movl r3=task_for_booting_cpu
- ;;
- ld8 r3=[r3]
- movl r2=init_task
- ;;
- cmp.eq isBP,isAP=r3,r0
- ;;
-(isAP) mov r2=r3
-#else
- movl r2=init_task
- cmp.eq isBP,isAP=r0,r0
-#endif
- ;;
- tpa r3=r2 // r3 == phys addr of task struct
- mov r16=-1
-#ifndef XEN
-(isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to map it
-#endif
-
- // load mapping for stack (virtaddr in r2, physaddr in r3)
- rsm psr.ic
- movl r17=PAGE_KERNEL
- ;;
- srlz.d
- dep r18=0,r3,0,12
- ;;
- or r18=r17,r18
-#ifdef XEN
- dep r2=-1,r3,60,4 // IMVA of task
-#else
- dep r2=-1,r3,61,3 // IMVA of task
-#endif
- ;;
- mov r17=rr[r2]
- shr.u r16=r3,IA64_GRANULE_SHIFT
- ;;
- dep r17=0,r17,8,24
- ;;
- mov cr.itir=r17
- mov cr.ifa=r2
-
- mov r19=IA64_TR_CURRENT_STACK
- ;;
- itr.d dtr[r19]=r18
- ;;
- ssm psr.ic
- srlz.d
- ;;
-
-.load_current:
- // load the "current" pointer (r13) and ar.k6 with the current task
- mov IA64_KR(CURRENT)=r2 // virtual address
- mov IA64_KR(CURRENT_STACK)=r16
- mov r13=r2
- /*
- * Reserve space at the top of the stack for "struct pt_regs". Kernel
- * threads don't store interesting values in that structure, but the space
- * still needs to be there because time-critical stuff such as the context
- * switching can be implemented more efficiently (for example, __switch_to()
- * always sets the psr.dfh bit of the task it is switching to).
- */
-
- addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2
- addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE
- mov ar.rsc=0 // place RSE in enforced lazy mode
- ;;
- loadrs // clear the dirty partition
-#ifdef XEN
-(isAP) br.few 2f
- movl r19=__phys_per_cpu_start
- mov r18=PERCPU_PAGE_SIZE
-#ifndef CONFIG_SMP
- add r19=r19,r18
- ;;
-#else
- movl r20=__cpu0_per_cpu
- ;;
- shr.u r18=r18,3
-1:
- ld8 r21=[r19],8 ;;
- st8[r20]=r21,8
- adds r18=-1,r18
- ;;
- cmp4.lt p7,p6=0,r18
-(p7) br.cond.dptk.few 1b
- ;;
-#endif
- movl r18=__per_cpu_offset
- movl r19=__cpu0_per_cpu
- movl r20=__per_cpu_start
- ;;
- sub r20=r19,r20
- ;;
- st8 [r18]=r20
-2:
-#endif
- ;;
- mov ar.bspstore=r2 // establish the new RSE stack
- ;;
- mov ar.rsc=0x3 // place RSE in eager mode
-
-#ifdef XEN
-(isBP) dep r28=-1,r28,60,4 // make address virtual
-#else
-(isBP) dep r28=-1,r28,61,3 // make address virtual
-#endif
-(isBP) movl r2=ia64_boot_param
- ;;
-(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
-
-#ifdef CONFIG_SMP
-(isAP) br.call.sptk.many rp=start_secondary
-.ret0:
-(isAP) br.cond.sptk self
-#endif
-
- // This is executed by the bootstrap processor (bsp) only:
-
-#ifdef CONFIG_IA64_FW_EMU
- // initialize PAL & SAL emulator:
- br.call.sptk.many rp=sys_fw_init
-.ret1:
-#endif
- br.call.sptk.many rp=start_kernel
-.ret2: addl r3=@ltoff(halt_msg),gp
- ;;
- alloc r2=ar.pfs,8,0,2,0
- ;;
- ld8 out0=[r3]
- br.call.sptk.many b0=console_print
-
-self: hint @pause
-#ifdef XEN
- ;;
- br.sptk.many self // endless loop
- ;;
-#else
- br.sptk.many self // endless loop
-#endif
-END(_start)
-
-GLOBAL_ENTRY(ia64_save_debug_regs)
- alloc r16=ar.pfs,1,0,0,0
- mov r20=ar.lc // preserve ar.lc
- mov ar.lc=IA64_NUM_DBG_REGS-1
- mov r18=0
- add r19=IA64_NUM_DBG_REGS*8,in0
- ;;
-1: mov r16=dbr[r18]
-#ifdef CONFIG_ITANIUM
- ;;
- srlz.d
-#endif
- mov r17=ibr[r18]
- add r18=1,r18
- ;;
- st8.nta [in0]=r16,8
- st8.nta [r19]=r17,8
- br.cloop.sptk.many 1b
- ;;
- mov ar.lc=r20 // restore ar.lc
- br.ret.sptk.many rp
-END(ia64_save_debug_regs)
-
-GLOBAL_ENTRY(ia64_load_debug_regs)
- alloc r16=ar.pfs,1,0,0,0
- lfetch.nta [in0]
- mov r20=ar.lc // preserve ar.lc
- add r19=IA64_NUM_DBG_REGS*8,in0
- mov ar.lc=IA64_NUM_DBG_REGS-1
- mov r18=-1
- ;;
-1: ld8.nta r16=[in0],8
- ld8.nta r17=[r19],8
- add r18=1,r18
- ;;
- mov dbr[r18]=r16
-#ifdef CONFIG_ITANIUM
- ;;
- srlz.d // Errata 132 (NoFix status)
-#endif
- mov ibr[r18]=r17
- br.cloop.sptk.many 1b
- ;;
- mov ar.lc=r20 // restore ar.lc
- br.ret.sptk.many rp
-END(ia64_load_debug_regs)
-
-GLOBAL_ENTRY(__ia64_save_fpu)
- alloc r2=ar.pfs,1,4,0,0
- adds loc0=96*16-16,in0
- adds loc1=96*16-16-128,in0
- ;;
- stf.spill.nta [loc0]=f127,-256
- stf.spill.nta [loc1]=f119,-256
- ;;
- stf.spill.nta [loc0]=f111,-256
- stf.spill.nta [loc1]=f103,-256
- ;;
- stf.spill.nta [loc0]=f95,-256
- stf.spill.nta [loc1]=f87,-256
- ;;
- stf.spill.nta [loc0]=f79,-256
- stf.spill.nta [loc1]=f71,-256
- ;;
- stf.spill.nta [loc0]=f63,-256
- stf.spill.nta [loc1]=f55,-256
- adds loc2=96*16-32,in0
- ;;
- stf.spill.nta [loc0]=f47,-256
- stf.spill.nta [loc1]=f39,-256
- adds loc3=96*16-32-128,in0
- ;;
- stf.spill.nta [loc2]=f126,-256
- stf.spill.nta [loc3]=f118,-256
- ;;
- stf.spill.nta [loc2]=f110,-256
- stf.spill.nta [loc3]=f102,-256
- ;;
- stf.spill.nta [loc2]=f94,-256
- stf.spill.nta [loc3]=f86,-256
- ;;
- stf.spill.nta [loc2]=f78,-256
- stf.spill.nta [loc3]=f70,-256
- ;;
- stf.spill.nta [loc2]=f62,-256
- stf.spill.nta [loc3]=f54,-256
- adds loc0=96*16-48,in0
- ;;
- stf.spill.nta [loc2]=f46,-256
- stf.spill.nta [loc3]=f38,-256
- adds loc1=96*16-48-128,in0
- ;;
- stf.spill.nta [loc0]=f125,-256
- stf.spill.nta [loc1]=f117,-256
- ;;
- stf.spill.nta [loc0]=f109,-256
- stf.spill.nta [loc1]=f101,-256
- ;;
- stf.spill.nta [loc0]=f93,-256
- stf.spill.nta [loc1]=f85,-256
- ;;
- stf.spill.nta [loc0]=f77,-256
- stf.spill.nta [loc1]=f69,-256
- ;;
- stf.spill.nta [loc0]=f61,-256
- stf.spill.nta [loc1]=f53,-256
- adds loc2=96*16-64,in0
- ;;
- stf.spill.nta [loc0]=f45,-256
- stf.spill.nta [loc1]=f37,-256
- adds loc3=96*16-64-128,in0
- ;;
- stf.spill.nta [loc2]=f124,-256
- stf.spill.nta [loc3]=f116,-256
- ;;
- stf.spill.nta [loc2]=f108,-256
- stf.spill.nta [loc3]=f100,-256
- ;;
- stf.spill.nta [loc2]=f92,-256
- stf.spill.nta [loc3]=f84,-256
- ;;
- stf.spill.nta [loc2]=f76,-256
- stf.spill.nta [loc3]=f68,-256
- ;;
- stf.spill.nta [loc2]=f60,-256
- stf.spill.nta [loc3]=f52,-256
- adds loc0=96*16-80,in0
- ;;
- stf.spill.nta [loc2]=f44,-256
- stf.spill.nta [loc3]=f36,-256
- adds loc1=96*16-80-128,in0
- ;;
- stf.spill.nta [loc0]=f123,-256
- stf.spill.nta [loc1]=f115,-256
- ;;
- stf.spill.nta [loc0]=f107,-256
- stf.spill.nta [loc1]=f99,-256
- ;;
- stf.spill.nta [loc0]=f91,-256
- stf.spill.nta [loc1]=f83,-256
- ;;
- stf.spill.nta [loc0]=f75,-256
- stf.spill.nta [loc1]=f67,-256
- ;;
- stf.spill.nta [loc0]=f59,-256
- stf.spill.nta [loc1]=f51,-256
- adds loc2=96*16-96,in0
- ;;
- stf.spill.nta [loc0]=f43,-256
- stf.spill.nta [loc1]=f35,-256
- adds loc3=96*16-96-128,in0
- ;;
- stf.spill.nta [loc2]=f122,-256
- stf.spill.nta [loc3]=f114,-256
- ;;
- stf.spill.nta [loc2]=f106,-256
- stf.spill.nta [loc3]=f98,-256
- ;;
- stf.spill.nta [loc2]=f90,-256
- stf.spill.nta [loc3]=f82,-256
- ;;
- stf.spill.nta [loc2]=f74,-256
- stf.spill.nta [loc3]=f66,-256
- ;;
- stf.spill.nta [loc2]=f58,-256
- stf.spill.nta [loc3]=f50,-256
- adds loc0=96*16-112,in0
- ;;
- stf.spill.nta [loc2]=f42,-256
- stf.spill.nta [loc3]=f34,-256
- adds loc1=96*16-112-128,in0
- ;;
- stf.spill.nta [loc0]=f121,-256
- stf.spill.nta [loc1]=f113,-256
- ;;
- stf.spill.nta [loc0]=f105,-256
- stf.spill.nta [loc1]=f97,-256
- ;;
- stf.spill.nta [loc0]=f89,-256
- stf.spill.nta [loc1]=f81,-256
- ;;
- stf.spill.nta [loc0]=f73,-256
- stf.spill.nta [loc1]=f65,-256
- ;;
- stf.spill.nta [loc0]=f57,-256
- stf.spill.nta [loc1]=f49,-256
- adds loc2=96*16-128,in0
- ;;
- stf.spill.nta [loc0]=f41,-256
- stf.spill.nta [loc1]=f33,-256
- adds loc3=96*16-128-128,in0
- ;;
- stf.spill.nta [loc2]=f120,-256
- stf.spill.nta [loc3]=f112,-256
- ;;
- stf.spill.nta [loc2]=f104,-256
- stf.spill.nta [loc3]=f96,-256
- ;;
- stf.spill.nta [loc2]=f88,-256
- stf.spill.nta [loc3]=f80,-256
- ;;
- stf.spill.nta [loc2]=f72,-256
- stf.spill.nta [loc3]=f64,-256
- ;;
- stf.spill.nta [loc2]=f56,-256
- stf.spill.nta [loc3]=f48,-256
- ;;
- stf.spill.nta [loc2]=f40
- stf.spill.nta [loc3]=f32
- br.ret.sptk.many rp
-END(__ia64_save_fpu)
-
-GLOBAL_ENTRY(__ia64_load_fpu)
- alloc r2=ar.pfs,1,2,0,0
- adds r3=128,in0
- adds r14=256,in0
- adds r15=384,in0
- mov loc0=512
- mov loc1=-1024+16
- ;;
- ldf.fill.nta f32=[in0],loc0
- ldf.fill.nta f40=[ r3],loc0
- ldf.fill.nta f48=[r14],loc0
- ldf.fill.nta f56=[r15],loc0
- ;;
- ldf.fill.nta f64=[in0],loc0
- ldf.fill.nta f72=[ r3],loc0
- ldf.fill.nta f80=[r14],loc0
- ldf.fill.nta f88=[r15],loc0
- ;;
- ldf.fill.nta f96=[in0],loc1
- ldf.fill.nta f104=[ r3],loc1
- ldf.fill.nta f112=[r14],loc1
- ldf.fill.nta f120=[r15],loc1
- ;;
- ldf.fill.nta f33=[in0],loc0
- ldf.fill.nta f41=[ r3],loc0
- ldf.fill.nta f49=[r14],loc0
- ldf.fill.nta f57=[r15],loc0
- ;;
- ldf.fill.nta f65=[in0],loc0
- ldf.fill.nta f73=[ r3],loc0
- ldf.fill.nta f81=[r14],loc0
- ldf.fill.nta f89=[r15],loc0
- ;;
- ldf.fill.nta f97=[in0],loc1
- ldf.fill.nta f105=[ r3],loc1
- ldf.fill.nta f113=[r14],loc1
- ldf.fill.nta f121=[r15],loc1
- ;;
- ldf.fill.nta f34=[in0],loc0
- ldf.fill.nta f42=[ r3],loc0
- ldf.fill.nta f50=[r14],loc0
- ldf.fill.nta f58=[r15],loc0
- ;;
- ldf.fill.nta f66=[in0],loc0
- ldf.fill.nta f74=[ r3],loc0
- ldf.fill.nta f82=[r14],loc0
- ldf.fill.nta f90=[r15],loc0
- ;;
- ldf.fill.nta f98=[in0],loc1
- ldf.fill.nta f106=[ r3],loc1
- ldf.fill.nta f114=[r14],loc1
- ldf.fill.nta f122=[r15],loc1
- ;;
- ldf.fill.nta f35=[in0],loc0
- ldf.fill.nta f43=[ r3],loc0
- ldf.fill.nta f51=[r14],loc0
- ldf.fill.nta f59=[r15],loc0
- ;;
- ldf.fill.nta f67=[in0],loc0
- ldf.fill.nta f75=[ r3],loc0
- ldf.fill.nta f83=[r14],loc0
- ldf.fill.nta f91=[r15],loc0
- ;;
- ldf.fill.nta f99=[in0],loc1
- ldf.fill.nta f107=[ r3],loc1
- ldf.fill.nta f115=[r14],loc1
- ldf.fill.nta f123=[r15],loc1
- ;;
- ldf.fill.nta f36=[in0],loc0
- ldf.fill.nta f44=[ r3],loc0
- ldf.fill.nta f52=[r14],loc0
- ldf.fill.nta f60=[r15],loc0
- ;;
- ldf.fill.nta f68=[in0],loc0
- ldf.fill.nta f76=[ r3],loc0
- ldf.fill.nta f84=[r14],loc0
- ldf.fill.nta f92=[r15],loc0
- ;;
- ldf.fill.nta f100=[in0],loc1
- ldf.fill.nta f108=[ r3],loc1
- ldf.fill.nta f116=[r14],loc1
- ldf.fill.nta f124=[r15],loc1
- ;;
- ldf.fill.nta f37=[in0],loc0
- ldf.fill.nta f45=[ r3],loc0
- ldf.fill.nta f53=[r14],loc0
- ldf.fill.nta f61=[r15],loc0
- ;;
- ldf.fill.nta f69=[in0],loc0
- ldf.fill.nta f77=[ r3],loc0
- ldf.fill.nta f85=[r14],loc0
- ldf.fill.nta f93=[r15],loc0
- ;;
- ldf.fill.nta f101=[in0],loc1
- ldf.fill.nta f109=[ r3],loc1
- ldf.fill.nta f117=[r14],loc1
- ldf.fill.nta f125=[r15],loc1
- ;;
- ldf.fill.nta f38 =[in0],loc0
- ldf.fill.nta f46 =[ r3],loc0
- ldf.fill.nta f54 =[r14],loc0
- ldf.fill.nta f62 =[r15],loc0
- ;;
- ldf.fill.nta f70 =[in0],loc0
- ldf.fill.nta f78 =[ r3],loc0
- ldf.fill.nta f86 =[r14],loc0
- ldf.fill.nta f94 =[r15],loc0
- ;;
- ldf.fill.nta f102=[in0],loc1
- ldf.fill.nta f110=[ r3],loc1
- ldf.fill.nta f118=[r14],loc1
- ldf.fill.nta f126=[r15],loc1
- ;;
- ldf.fill.nta f39 =[in0],loc0
- ldf.fill.nta f47 =[ r3],loc0
- ldf.fill.nta f55 =[r14],loc0
- ldf.fill.nta f63 =[r15],loc0
- ;;
- ldf.fill.nta f71 =[in0],loc0
- ldf.fill.nta f79 =[ r3],loc0
- ldf.fill.nta f87 =[r14],loc0
- ldf.fill.nta f95 =[r15],loc0
- ;;
- ldf.fill.nta f103=[in0]
- ldf.fill.nta f111=[ r3]
- ldf.fill.nta f119=[r14]
- ldf.fill.nta f127=[r15]
- br.ret.sptk.many rp
-END(__ia64_load_fpu)
-
-GLOBAL_ENTRY(__ia64_init_fpu)
- stf.spill [sp]=f0 // M3
- mov f32=f0 // F
- nop.b 0
-
- ldfps f33,f34=[sp] // M0
- ldfps f35,f36=[sp] // M1
- mov f37=f0 // F
- ;;
-
- setf.s f38=r0 // M2
- setf.s f39=r0 // M3
- mov f40=f0 // F
-
- ldfps f41,f42=[sp] // M0
- ldfps f43,f44=[sp] // M1
- mov f45=f0 // F
-
- setf.s f46=r0 // M2
- setf.s f47=r0 // M3
- mov f48=f0 // F
-
- ldfps f49,f50=[sp] // M0
- ldfps f51,f52=[sp] // M1
- mov f53=f0 // F
-
- setf.s f54=r0 // M2
- setf.s f55=r0 // M3
- mov f56=f0 // F
-
- ldfps f57,f58=[sp] // M0
- ldfps f59,f60=[sp] // M1
- mov f61=f0 // F
-
- setf.s f62=r0 // M2
- setf.s f63=r0 // M3
- mov f64=f0 // F
-
- ldfps f65,f66=[sp] // M0
- ldfps f67,f68=[sp] // M1
- mov f69=f0 // F
-
- setf.s f70=r0 // M2
- setf.s f71=r0 // M3
- mov f72=f0 // F
-
- ldfps f73,f74=[sp] // M0
- ldfps f75,f76=[sp] // M1
- mov f77=f0 // F
-
- setf.s f78=r0 // M2
- setf.s f79=r0 // M3
- mov f80=f0 // F
-
- ldfps f81,f82=[sp] // M0
- ldfps f83,f84=[sp] // M1
- mov f85=f0 // F
-
- setf.s f86=r0 // M2
- setf.s f87=r0 // M3
- mov f88=f0 // F
-
- /*
- * When the instructions are cached, it would be faster to initialize
- * the remaining registers with simply mov instructions (F-unit).
- * This gets the time down to ~29 cycles. However, this would use up
- * 33 bundles, whereas continuing with the above pattern yields
- * 10 bundles and ~30 cycles.
- */
-
- ldfps f89,f90=[sp] // M0
- ldfps f91,f92=[sp] // M1
- mov f93=f0 // F
-
- setf.s f94=r0 // M2
- setf.s f95=r0 // M3
- mov f96=f0 // F
-
- ldfps f97,f98=[sp] // M0
- ldfps f99,f100=[sp] // M1
- mov f101=f0 // F
-
- setf.s f102=r0 // M2
- setf.s f103=r0 // M3
- mov f104=f0 // F
-
- ldfps f105,f106=[sp] // M0
- ldfps f107,f108=[sp] // M1
- mov f109=f0 // F
-
- setf.s f110=r0 // M2
- setf.s f111=r0 // M3
- mov f112=f0 // F
-
- ldfps f113,f114=[sp] // M0
- ldfps f115,f116=[sp] // M1
- mov f117=f0 // F
-
- setf.s f118=r0 // M2
- setf.s f119=r0 // M3
- mov f120=f0 // F
-
- ldfps f121,f122=[sp] // M0
- ldfps f123,f124=[sp] // M1
- mov f125=f0 // F
-
- setf.s f126=r0 // M2
- setf.s f127=r0 // M3
- br.ret.sptk.many rp // F
-END(__ia64_init_fpu)
-
-/*
- * Switch execution mode from virtual to physical
- *
- * Inputs:
- * r16 = new psr to establish
- * Output:
- * r19 = old virtual address of ar.bsp
- * r20 = old virtual address of sp
- *
- * Note: RSE must already be in enforced lazy mode
- */
-GLOBAL_ENTRY(ia64_switch_mode_phys)
- {
- alloc r2=ar.pfs,0,0,0,0
- rsm psr.i | psr.ic // disable interrupts and interrupt collection
- mov r15=ip
- }
- ;;
- {
- flushrs // must be first insn in group
- srlz.i
- }
- ;;
- mov cr.ipsr=r16 // set new PSR
- add r3=1f-ia64_switch_mode_phys,r15
-
- mov r19=ar.bsp
- mov r20=sp
- mov r14=rp // get return address into a general register
- ;;
-
- // going to physical mode, use tpa to translate virt->phys
- tpa r17=r19
- tpa r3=r3
- tpa sp=sp
- tpa r14=r14
- ;;
-
- mov r18=ar.rnat // save ar.rnat
- mov ar.bspstore=r17 // this steps on ar.rnat
- mov cr.iip=r3
- mov cr.ifs=r0
- ;;
- mov ar.rnat=r18 // restore ar.rnat
- rfi // must be last insn in group
- ;;
-1: mov rp=r14
- br.ret.sptk.many rp
-END(ia64_switch_mode_phys)
-
-/*
- * Switch execution mode from physical to virtual
- *
- * Inputs:
- * r16 = new psr to establish
- * r19 = new bspstore to establish
- * r20 = new sp to establish
- *
- * Note: RSE must already be in enforced lazy mode
- */
-GLOBAL_ENTRY(ia64_switch_mode_virt)
- {
- alloc r2=ar.pfs,0,0,0,0
- rsm psr.i | psr.ic // disable interrupts and interrupt collection
- mov r15=ip
- }
- ;;
- {
- flushrs // must be first insn in group
- srlz.i
- }
- ;;
- mov cr.ipsr=r16 // set new PSR
- add r3=1f-ia64_switch_mode_virt,r15
-
- mov r14=rp // get return address into a general register
- ;;
-
- // going to virtual
- // - for code addresses, set upper bits of addr to KERNEL_START
- // - for stack addresses, copy from input argument
- movl r18=KERNEL_START
- dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
- dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
- mov sp=r20
- ;;
- or r3=r3,r18
- or r14=r14,r18
- ;;
-
- mov r18=ar.rnat // save ar.rnat
- mov ar.bspstore=r19 // this steps on ar.rnat
- mov cr.iip=r3
- mov cr.ifs=r0
- ;;
- mov ar.rnat=r18 // restore ar.rnat
- rfi // must be last insn in group
- ;;
-1: mov rp=r14
- br.ret.sptk.many rp
-END(ia64_switch_mode_virt)
-
-GLOBAL_ENTRY(ia64_delay_loop)
- .prologue
-{ nop 0 // work around GAS unwind info generation bug...
- .save ar.lc,r2
- mov r2=ar.lc
- .body
- ;;
- mov ar.lc=r32
-}
- ;;
- // force loop to be 32-byte aligned (GAS bug means we cannot use .align
- // inside function body without corrupting unwind info).
-{ nop 0 }
-1: br.cloop.sptk.few 1b
- ;;
- mov ar.lc=r2
- br.ret.sptk.many rp
-END(ia64_delay_loop)
-
-#ifndef XEN
-/*
- * Return a CPU-local timestamp in nano-seconds. This timestamp is
- * NOT synchronized across CPUs its return value must never be
- * compared against the values returned on another CPU. The usage in
- * kernel/sched.c ensures that.
- *
- * The return-value of sched_clock() is NOT supposed to wrap-around.
- * If it did, it would cause some scheduling hiccups (at the worst).
- * Fortunately, with a 64-bit cycle-counter ticking at 100GHz, even
- * that would happen only once every 5+ years.
- *
- * The code below basically calculates:
- *
- * (ia64_get_itc() * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT
- *
- * except that the multiplication and the shift are done with 128-bit
- * intermediate precision so that we can produce a full 64-bit result.
- */
-GLOBAL_ENTRY(sched_clock)
-#ifdef XEN
- movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET
-#else
- addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
-#endif
- mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
- ;;
- ldf8 f8=[r8]
- ;;
- setf.sig f9=r9 // certain to stall, so issue it _after_ ldf8...
- ;;
- xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product (4 cyc)
- xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product
- ;;
- getf.sig r8=f10 // (5 cyc)
- getf.sig r9=f11
- ;;
- shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
- br.ret.sptk.many rp
-END(sched_clock)
-
-GLOBAL_ENTRY(start_kernel_thread)
- .prologue
- .save rp, r0 // this is the end of the call-chain
- .body
- alloc r2 = ar.pfs, 0, 0, 2, 0
- mov out0 = r9
- mov out1 = r11;;
- br.call.sptk.many rp = kernel_thread_helper;;
- mov out0 = r8
- br.call.sptk.many rp = sys_exit;;
-1: br.sptk.few 1b // not reached
-END(start_kernel_thread)
-#endif /* XEN */
-
-#ifdef CONFIG_IA64_BRL_EMU
-
-/*
- * Assembly routines used by brl_emu.c to set preserved register state.
- */
-
-#define SET_REG(reg) \
- GLOBAL_ENTRY(ia64_set_##reg); \
- alloc r16=ar.pfs,1,0,0,0; \
- mov reg=r32; \
- ;; \
- br.ret.sptk.many rp; \
- END(ia64_set_##reg)
-
-SET_REG(b1);
-SET_REG(b2);
-SET_REG(b3);
-SET_REG(b4);
-SET_REG(b5);
-
-#endif /* CONFIG_IA64_BRL_EMU */
-
-#ifdef CONFIG_SMP
- /*
- * This routine handles spinlock contention. It uses a non-standard calling
- * convention to avoid converting leaf routines into interior routines. Because
- * of this special convention, there are several restrictions:
- *
- * - do not use gp relative variables, this code is called from the kernel
- * and from modules, r1 is undefined.
- * - do not use stacked registers, the caller owns them.
- * - do not use the scratch stack space, the caller owns it.
- * - do not use any registers other than the ones listed below
- *
- * Inputs:
- * ar.pfs - saved CFM of caller
- * ar.ccv - 0 (and available for use)
- * r27 - flags from spin_lock_irqsave or 0. Must be preserved.
- * r28 - available for use.
- * r29 - available for use.
- * r30 - available for use.
- * r31 - address of lock, available for use.
- * b6 - return address
- * p14 - available for use.
- * p15 - used to track flag status.
- *
- * If you patch this code to use more registers, do not forget to update
- * the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.
- */
-
-#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-
-GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
- .prologue
- .save ar.pfs, r0 // this code effectively has a zero frame size
- .save rp, r28
- .body
- nop 0
- tbit.nz p15,p0=r27,IA64_PSR_I_BIT
- .restore sp // pop existing prologue after next insn
- mov b6 = r28
- .prologue
- .save ar.pfs, r0
- .altrp b6
- .body
- ;;
-(p15) ssm psr.i // reenable interrupts if they were on
- // DavidM says that srlz.d is slow and is not required in this case
-.wait:
- // exponential backoff, kdb, lockmeter etc. go in here
- hint @pause
- ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word
- nop 0
- ;;
- cmp4.ne p14,p0=r30,r0
-(p14) br.cond.sptk.few .wait
-(p15) rsm psr.i // disable interrupts if we reenabled them
- br.cond.sptk.few b6 // lock is now free, try to acquire
- .global ia64_spinlock_contention_pre3_4_end // for kernprof
-ia64_spinlock_contention_pre3_4_end:
-END(ia64_spinlock_contention_pre3_4)
-
-#else
-
-GLOBAL_ENTRY(ia64_spinlock_contention)
- .prologue
- .altrp b6
- .body
- tbit.nz p15,p0=r27,IA64_PSR_I_BIT
- ;;
-.wait:
-(p15) ssm psr.i // reenable interrupts if they were on
- // DavidM says that srlz.d is slow and is not required in this case
-.wait2:
- // exponential backoff, kdb, lockmeter etc. go in here
- hint @pause
- ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word
- ;;
- cmp4.ne p14,p0=r30,r0
- mov r30 = 1
-(p14) br.cond.sptk.few .wait2
-(p15) rsm psr.i // disable interrupts if we reenabled them
- ;;
- cmpxchg4.acq r30=[r31], r30, ar.ccv
- ;;
- cmp4.ne p14,p0=r0,r30
-(p14) br.cond.sptk.few .wait
-
- br.ret.sptk.many b6 // lock is now taken
-END(ia64_spinlock_contention)
-
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-GLOBAL_ENTRY(ia64_jump_to_sal)
- alloc r16=ar.pfs,1,0,0,0;;
- rsm psr.i | psr.ic
-{
- flushrs
- srlz.i
-}
- tpa r25=in0
- movl r18=tlb_purge_done;;
- DATA_VA_TO_PA(r18);;
- mov b1=r18 // Return location
- movl r18=ia64_do_tlb_purge;;
- DATA_VA_TO_PA(r18);;
- mov b2=r18 // doing tlb_flush work
- mov ar.rsc=0 // Put RSE in enforced lazy, LE mode
- movl r17=1f;;
- DATA_VA_TO_PA(r17);;
- mov cr.iip=r17
- movl r16=SAL_PSR_BITS_TO_SET;;
- mov cr.ipsr=r16
- mov cr.ifs=r0;;
- rfi;;
-1:
- /*
- * Invalidate all TLB data/inst
- */
- br.sptk.many b2;; // jump to tlb purge code
-
-tlb_purge_done:
- RESTORE_REGION_REGS(r25, r17,r18,r19);;
- RESTORE_REG(b0, r25, r17);;
- RESTORE_REG(b1, r25, r17);;
- RESTORE_REG(b2, r25, r17);;
- RESTORE_REG(b3, r25, r17);;
- RESTORE_REG(b4, r25, r17);;
- RESTORE_REG(b5, r25, r17);;
- ld8 r1=[r25],0x08;;
- ld8 r12=[r25],0x08;;
- ld8 r13=[r25],0x08;;
- RESTORE_REG(ar.fpsr, r25, r17);;
- RESTORE_REG(ar.pfs, r25, r17);;
- RESTORE_REG(ar.rnat, r25, r17);;
- RESTORE_REG(ar.unat, r25, r17);;
- RESTORE_REG(ar.bspstore, r25, r17);;
- RESTORE_REG(cr.dcr, r25, r17);;
- RESTORE_REG(cr.iva, r25, r17);;
- RESTORE_REG(cr.pta, r25, r17);;
-#ifdef XEN
- dv_serialize_instruction
-#endif
- RESTORE_REG(cr.itv, r25, r17);;
- RESTORE_REG(cr.pmv, r25, r17);;
- RESTORE_REG(cr.cmcv, r25, r17);;
- RESTORE_REG(cr.lrr0, r25, r17);;
- RESTORE_REG(cr.lrr1, r25, r17);;
- ld8 r4=[r25],0x08;;
- ld8 r5=[r25],0x08;;
- ld8 r6=[r25],0x08;;
- ld8 r7=[r25],0x08;;
- ld8 r17=[r25],0x08;;
- mov pr=r17,-1;;
- RESTORE_REG(ar.lc, r25, r17);;
- /*
- * Now Restore floating point regs
- */
- ldf.fill.nta f2=[r25],16;;
- ldf.fill.nta f3=[r25],16;;
- ldf.fill.nta f4=[r25],16;;
- ldf.fill.nta f5=[r25],16;;
- ldf.fill.nta f16=[r25],16;;
- ldf.fill.nta f17=[r25],16;;
- ldf.fill.nta f18=[r25],16;;
- ldf.fill.nta f19=[r25],16;;
- ldf.fill.nta f20=[r25],16;;
- ldf.fill.nta f21=[r25],16;;
- ldf.fill.nta f22=[r25],16;;
- ldf.fill.nta f23=[r25],16;;
- ldf.fill.nta f24=[r25],16;;
- ldf.fill.nta f25=[r25],16;;
- ldf.fill.nta f26=[r25],16;;
- ldf.fill.nta f27=[r25],16;;
- ldf.fill.nta f28=[r25],16;;
- ldf.fill.nta f29=[r25],16;;
- ldf.fill.nta f30=[r25],16;;
- ldf.fill.nta f31=[r25],16;;
-
- /*
- * Now that we have done all the register restores
- * we are now ready for the big DIVE to SAL Land
- */
- ssm psr.ic;;
- srlz.d;;
- br.ret.sptk.many b0;;
-END(ia64_jump_to_sal)
-#endif /* CONFIG_HOTPLUG_CPU */
-
-#endif /* CONFIG_SMP */
diff --git a/xen/arch/ia64/linux-xen/hpsim_ssc.h b/xen/arch/ia64/linux-xen/hpsim_ssc.h
deleted file mode 100644
index c3f36e758b..0000000000
--- a/xen/arch/ia64/linux-xen/hpsim_ssc.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Platform dependent support for HP simulator.
- *
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
- */
-#ifndef _IA64_PLATFORM_HPSIM_SSC_H
-#define _IA64_PLATFORM_HPSIM_SSC_H
-
-/* Simulator system calls: */
-
-#define SSC_CONSOLE_INIT 20
-#define SSC_GETCHAR 21
-#define SSC_PUTCHAR 31
-#define SSC_CONNECT_INTERRUPT 58
-#define SSC_GENERATE_INTERRUPT 59
-#define SSC_SET_PERIODIC_INTERRUPT 60
-#define SSC_GET_RTC 65
-#define SSC_EXIT 66
-#define SSC_LOAD_SYMBOLS 69
-#define SSC_GET_TOD 74
-#define SSC_CTL_TRACE 76
-
-#define SSC_NETDEV_PROBE 100
-#define SSC_NETDEV_SEND 101
-#define SSC_NETDEV_RECV 102
-#define SSC_NETDEV_ATTACH 103
-#define SSC_NETDEV_DETACH 104
-
-/*
- * Simulator system call.
- */
-extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
-
-#ifdef XEN
-/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
- * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
-#define SSC_OPEN 50
-#define SSC_CLOSE 51
-#define SSC_READ 52
-#define SSC_WRITE 53
-#define SSC_GET_COMPLETION 54
-#define SSC_WAIT_COMPLETION 55
-
-#define SSC_WRITE_ACCESS 2
-#define SSC_READ_ACCESS 1
-
-struct ssc_disk_req {
- unsigned long addr;
- unsigned long len;
-};
-#endif
-
-#endif /* _IA64_PLATFORM_HPSIM_SSC_H */
diff --git a/xen/arch/ia64/linux-xen/iosapic.c b/xen/arch/ia64/linux-xen/iosapic.c
deleted file mode 100644
index 3f4419df03..0000000000
--- a/xen/arch/ia64/linux-xen/iosapic.c
+++ /dev/null
@@ -1,1288 +0,0 @@
-/*
- * I/O SAPIC support.
- *
- * Copyright (C) 1999 Intel Corp.
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 2000-2002 J.I. Lee <jung-ik.lee@intel.com>
- * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co.
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
- *
- * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O APIC code.
- * In particular, we now have separate handlers for edge
- * and level triggered interrupts.
- * 00/10/27 Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector allocation
- * PCI to vector mapping, shared PCI interrupts.
- * 00/10/27 D. Mosberger Document things a bit more to make them more understandable.
- * Clean up much of the old IOSAPIC cruft.
- * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts and fixes for
- * ACPI S5(SoftOff) support.
- * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT
- * 02/01/07 E. Focht <efocht@ess.nec.de> Redirectable interrupt vectors in
- * iosapic_set_affinity(), initializations for
- * /proc/irq/#/smp_affinity
- * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing.
- * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq
- * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to IOSAPIC mapping
- * error
- * 02/07/29 T. Kochi Allocate interrupt vectors dynamically
- * 02/08/04 T. Kochi Cleaned up terminology (irq, global system interrupt, vector, etc.)
- * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's pci_irq code.
- * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC.
- * Remove iosapic_address & gsi_base from external interfaces.
- * Rationalize __init/__devinit attributes.
- * 04/12/04 Ashok Raj <ashok.raj@intel.com> Intel Corporation 2004
- * Updated to work with irq migration necessary for CPU Hotplug
- */
-/*
- * Here is what the interrupt logic between a PCI device and the kernel looks like:
- *
- * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC, INTD). The
- * device is uniquely identified by its bus--, and slot-number (the function
- * number does not matter here because all functions share the same interrupt
- * lines).
- *
- * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC controller.
- * Multiple interrupt lines may have to share the same IOSAPIC pin (if they're level
- * triggered and use the same polarity). Each interrupt line has a unique Global
- * System Interrupt (GSI) number which can be calculated as the sum of the controller's
- * base GSI number and the IOSAPIC pin number to which the line connects.
- *
- * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the IOSAPIC pin
- * into the IA-64 interrupt vector. This interrupt vector is then sent to the CPU.
- *
- * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is used as
- * architecture-independent interrupt handling mechanism in Linux. As an
- * IRQ is a number, we have to have IA-64 interrupt vector number <-> IRQ number
- * mapping. On smaller systems, we use one-to-one mapping between IA-64 vector and
- * IRQ. A platform can implement platform_irq_to_vector(irq) and
- * platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
- * Please see also include/asm-ia64/hw_irq.h for those APIs.
- *
- * To sum up, there are three levels of mappings involved:
- *
- * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
- *
- * Note: The term "IRQ" is loosely used everywhere in Linux kernel to describe interrupts.
- * Now we use "IRQ" only for Linux IRQ's. ISA IRQ (isa_irq) is the only exception in this
- * source code.
- */
-#include <linux/config.h>
-
-#include <linux/acpi.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#ifdef XEN
-#include <xen/errno.h>
-#endif
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/string.h>
-#include <linux/bootmem.h>
-
-#include <asm/delay.h>
-#include <asm/hw_irq.h>
-#include <asm/io.h>
-#include <asm/iosapic.h>
-#include <asm/machvec.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-
-#ifdef XEN
-static inline int iosapic_irq_to_vector (int irq)
-{
- return irq;
-}
-
-#undef irq_to_vector
-#define irq_to_vector(irq) iosapic_irq_to_vector(irq)
-#define AUTO_ASSIGN AUTO_ASSIGN_IRQ
-#endif
-
-#undef DEBUG_INTERRUPT_ROUTING
-
-#ifdef DEBUG_INTERRUPT_ROUTING
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-#define NR_PREALLOCATE_RTE_ENTRIES (PAGE_SIZE / sizeof(struct iosapic_rte_info))
-#define RTE_PREALLOCATED (1)
-
-static DEFINE_SPINLOCK(iosapic_lock);
-
-/* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */
-
-struct iosapic_rte_info {
- struct list_head rte_list; /* node in list of RTEs sharing the same vector */
- char __iomem *addr; /* base address of IOSAPIC */
- unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */
- char rte_index; /* IOSAPIC RTE index */
- int refcnt; /* reference counter */
- unsigned int flags; /* flags */
-} ____cacheline_aligned;
-
-static struct iosapic_intr_info {
- struct list_head rtes; /* RTEs using this vector (empty => not an IOSAPIC interrupt) */
- int count; /* # of RTEs that shares this vector */
- u32 low32; /* current value of low word of Redirection table entry */
- unsigned int dest; /* destination CPU physical ID */
- unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
- unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */
- unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
-} iosapic_intr_info[IA64_NUM_VECTORS];
-
-#ifndef XEN
-static struct iosapic {
- char __iomem *addr; /* base address of IOSAPIC */
- unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */
- unsigned short num_rte; /* number of RTE in this IOSAPIC */
- int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
-#ifdef CONFIG_NUMA
- unsigned short node; /* numa node association via pxm */
-#endif
-} iosapic_lists[NR_IOSAPICS];
-#else
-struct iosapic iosapic_lists[NR_IOSAPICS];
-#endif
-
-static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
-
-static int iosapic_kmalloc_ok;
-static LIST_HEAD(free_rte_list);
-
-/*
- * Find an IOSAPIC associated with a GSI
- */
-static inline int
-find_iosapic (unsigned int gsi)
-{
- int i;
-
- for (i = 0; i < NR_IOSAPICS; i++) {
- if ((unsigned) (gsi - iosapic_lists[i].gsi_base) < iosapic_lists[i].num_rte)
- return i;
- }
-
- return -1;
-}
-
-static inline int
-_gsi_to_vector (unsigned int gsi)
-{
- struct iosapic_intr_info *info;
- struct iosapic_rte_info *rte;
-
- for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info)
- list_for_each_entry(rte, &info->rtes, rte_list)
- if (rte->gsi_base + rte->rte_index == gsi)
- return info - iosapic_intr_info;
- return -1;
-}
-
-/*
- * Translate GSI number to the corresponding IA-64 interrupt vector. If no
- * entry exists, return -1.
- */
-inline int
-gsi_to_vector (unsigned int gsi)
-{
- return _gsi_to_vector(gsi);
-}
-
-int
-gsi_to_irq (unsigned int gsi)
-{
- unsigned long flags;
- int irq;
- /*
- * XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq
- * numbers...
- */
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- irq = _gsi_to_vector(gsi);
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-
- return irq;
-}
-
-static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, unsigned int vec)
-{
- struct iosapic_rte_info *rte;
-
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
- if (rte->gsi_base + rte->rte_index == gsi)
- return rte;
- return NULL;
-}
-
-static void
-set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
-{
- unsigned long pol, trigger, dmode;
- u32 low32, high32;
- char __iomem *addr;
- int rte_index;
- char redir;
- struct iosapic_rte_info *rte;
-
- DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
-
- rte = gsi_vector_to_rte(gsi, vector);
- if (!rte)
- return; /* not an IOSAPIC interrupt */
-
- rte_index = rte->rte_index;
- addr = rte->addr;
- pol = iosapic_intr_info[vector].polarity;
- trigger = iosapic_intr_info[vector].trigger;
- dmode = iosapic_intr_info[vector].dmode;
-
- redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
-
-#ifdef CONFIG_SMP
- {
- unsigned int irq;
-
- for (irq = 0; irq < NR_IRQS; ++irq)
- if (irq_to_vector(irq) == vector) {
- set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
- break;
- }
- }
-#endif
-
- low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
- (trigger << IOSAPIC_TRIGGER_SHIFT) |
- (dmode << IOSAPIC_DELIVERY_SHIFT) |
- ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) |
- vector);
-
- /* dest contains both id and eid */
- high32 = (dest << IOSAPIC_DEST_SHIFT);
-
- iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
- iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
- iosapic_intr_info[vector].low32 = low32;
- iosapic_intr_info[vector].dest = dest;
-}
-
-void
-kexec_disable_iosapic(void)
-{
- struct iosapic_intr_info *info;
- struct iosapic_rte_info *rte;
- u8 vec = 0;
- for (info = iosapic_intr_info; info <
- iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) {
- list_for_each_entry(rte, &info->rtes,
- rte_list) {
- iosapic_write(rte->addr,
- IOSAPIC_RTE_LOW(rte->rte_index),
- IOSAPIC_MASK|vec);
- iosapic_eoi(rte->addr, vec);
- }
- }
-}
-
-static void
-mask_irq (struct irq_desc *desc)
-{
- unsigned long flags;
- char __iomem *addr;
- u32 low32;
- int rte_index;
- ia64_vector vec = irq_to_vector(desc->irq);
- struct iosapic_rte_info *rte;
-
- if (list_empty(&iosapic_intr_info[vec].rtes))
- return; /* not an IOSAPIC interrupt! */
-
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- /* set only the mask bit */
- low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK;
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
- addr = rte->addr;
- rte_index = rte->rte_index;
- iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
- }
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-}
-
-static void
-unmask_irq (struct irq_desc *desc)
-{
- unsigned long flags;
- char __iomem *addr;
- u32 low32;
- int rte_index;
- ia64_vector vec = irq_to_vector(desc->irq);
- struct iosapic_rte_info *rte;
-
- if (list_empty(&iosapic_intr_info[vec].rtes))
- return; /* not an IOSAPIC interrupt! */
-
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK;
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
- addr = rte->addr;
- rte_index = rte->rte_index;
- iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
- }
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-}
-
-
-static void
-iosapic_set_affinity (struct irq_desc *desc, const cpumask_t *mask)
-{
-#ifdef CONFIG_SMP
- unsigned long flags;
- u32 high32, low32;
- int dest, rte_index;
- char __iomem *addr;
- int redir = (desc->irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
- unsigned int irq = desc->irq & ~IA64_IRQ_REDIRECTED;
- ia64_vector vec;
- struct iosapic_rte_info *rte;
-
- vec = irq_to_vector(irq);
-
- if (cpumask_empty(mask))
- return;
-
- dest = cpu_physical_id(cpumask_first(mask));
-
- if (list_empty(&iosapic_intr_info[vec].rtes))
- return; /* not an IOSAPIC interrupt */
-
- set_irq_affinity_info(irq, dest, redir);
-
- /* dest contains both id and eid */
- high32 = dest << IOSAPIC_DEST_SHIFT;
-
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- low32 = iosapic_intr_info[vec].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
-
- if (redir)
- /* change delivery mode to lowest priority */
- low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
- else
- /* change delivery mode to fixed */
- low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
-
- iosapic_intr_info[vec].low32 = low32;
- iosapic_intr_info[vec].dest = dest;
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
- addr = rte->addr;
- rte_index = rte->rte_index;
- iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
- iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
- }
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-#endif
-}
-
-/*
- * Handlers for level-triggered interrupts.
- */
-
-static unsigned int
-iosapic_startup_level_irq (struct irq_desc *desc)
-{
- unmask_irq(desc);
- return 0;
-}
-
-static void
-iosapic_end_level_irq (struct irq_desc *desc)
-{
- ia64_vector vec = irq_to_vector(desc->irq);
- struct iosapic_rte_info *rte;
-
- move_irq(desc->irq);
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
- iosapic_eoi(rte->addr, vec);
-}
-
-#define iosapic_shutdown_level_irq mask_irq
-#define iosapic_enable_level_irq unmask_irq
-#define iosapic_disable_level_irq mask_irq
-#define iosapic_ack_level_irq irq_actor_none
-
-static hw_irq_controller irq_type_iosapic_level = {
- .typename = "IO-SAPIC-level",
- .startup = iosapic_startup_level_irq,
- .shutdown = iosapic_shutdown_level_irq,
- .enable = iosapic_enable_level_irq,
- .disable = iosapic_disable_level_irq,
- .ack = iosapic_ack_level_irq,
- .end = iosapic_end_level_irq,
- .set_affinity = iosapic_set_affinity
-};
-
-/*
- * Handlers for edge-triggered interrupts.
- */
-
-static unsigned int
-iosapic_startup_edge_irq (struct irq_desc *desc)
-{
- unmask_irq(desc);
- /*
- * IOSAPIC simply drops interrupts pended while the
- * corresponding pin was masked, so we can't know if an
- * interrupt is pending already. Let's hope not...
- */
- return 0;
-}
-
-static void
-iosapic_ack_edge_irq (struct irq_desc *desc)
-{
- move_irq(idesc->irq);
- /*
- * Once we have recorded IRQ_PENDING already, we can mask the
- * interrupt for real. This prevents IRQ storms from unhandled
- * devices.
- */
- if ((desc->status & (IRQ_PENDING|IRQ_DISABLED)) == (IRQ_PENDING|IRQ_DISABLED))
- mask_irq(desc);
-}
-
-#define iosapic_enable_edge_irq unmask_irq
-#define iosapic_disable_edge_irq irq_disable_none
-#define iosapic_end_edge_irq irq_actor_none
-
-static hw_irq_controller irq_type_iosapic_edge = {
- .typename = "IO-SAPIC-edge",
- .startup = iosapic_startup_edge_irq,
- .shutdown = iosapic_disable_edge_irq,
- .enable = iosapic_enable_edge_irq,
- .disable = iosapic_disable_edge_irq,
- .ack = iosapic_ack_edge_irq,
- .end = iosapic_end_edge_irq,
- .set_affinity = iosapic_set_affinity
-};
-
-unsigned int
-iosapic_version (char __iomem *addr)
-{
- /*
- * IOSAPIC Version Register return 32 bit structure like:
- * {
- * unsigned int version : 8;
- * unsigned int reserved1 : 8;
- * unsigned int max_redir : 8;
- * unsigned int reserved2 : 8;
- * }
- */
- return iosapic_read(addr, IOSAPIC_VERSION);
-}
-
-static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long pol)
-{
- int i, vector = -1, min_count = -1;
- struct iosapic_intr_info *info;
-
- /*
- * shared vectors for edge-triggered interrupts are not
- * supported yet
- */
- if (trigger == IOSAPIC_EDGE)
- return -1;
-
- for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) {
- info = &iosapic_intr_info[i];
- if (info->trigger == trigger && info->polarity == pol &&
- (info->dmode == IOSAPIC_FIXED || info->dmode == IOSAPIC_LOWEST_PRIORITY)) {
- if (min_count == -1 || info->count < min_count) {
- vector = i;
- min_count = info->count;
- }
- }
- }
-
- return vector;
-}
-
-/*
- * if the given vector is already owned by other,
- * assign a new vector for the other and make the vector available
- */
-static void __init
-iosapic_reassign_vector (int vector)
-{
- int new_vector;
-
- if (!list_empty(&iosapic_intr_info[vector].rtes)) {
- new_vector = assign_irq_vector(AUTO_ASSIGN);
- if (new_vector < 0)
- panic("%s: out of interrupt vectors!\n", __FUNCTION__);
- printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector);
- memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector],
- sizeof(struct iosapic_intr_info));
- INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes);
- list_move(iosapic_intr_info[vector].rtes.next, &iosapic_intr_info[new_vector].rtes);
- memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
- iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
- INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
- }
-}
-
-static struct iosapic_rte_info *iosapic_alloc_rte (void)
-{
- int i;
- struct iosapic_rte_info *rte;
- int preallocated = 0;
-
- if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
-#ifdef XEN
- rte = xmalloc_bytes(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES);
-#else
- rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES);
-#endif
- if (!rte)
- return NULL;
- for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
- list_add(&rte->rte_list, &free_rte_list);
- }
-
- if (!list_empty(&free_rte_list)) {
- rte = list_entry(free_rte_list.next, struct iosapic_rte_info, rte_list);
- list_del(&rte->rte_list);
- preallocated++;
- } else {
- rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
- if (!rte)
- return NULL;
- }
-
- memset(rte, 0, sizeof(struct iosapic_rte_info));
- if (preallocated)
- rte->flags |= RTE_PREALLOCATED;
-
- return rte;
-}
-
-static void iosapic_free_rte (struct iosapic_rte_info *rte)
-{
- if (rte->flags & RTE_PREALLOCATED)
- list_add_tail(&rte->rte_list, &free_rte_list);
- else
- kfree(rte);
-}
-
-static inline int vector_is_shared (int vector)
-{
- return (iosapic_intr_info[vector].count > 1);
-}
-
-static int
-register_intr (unsigned int gsi, int vector, unsigned char delivery,
- unsigned long polarity, unsigned long trigger)
-{
- irq_desc_t *idesc;
- hw_irq_controller *irq_type;
- int rte_index;
- int index;
- unsigned long gsi_base;
- void __iomem *iosapic_address;
- struct iosapic_rte_info *rte;
-
- index = find_iosapic(gsi);
- if (index < 0) {
- printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi);
- return -ENODEV;
- }
-
- iosapic_address = iosapic_lists[index].addr;
- gsi_base = iosapic_lists[index].gsi_base;
-
- rte = gsi_vector_to_rte(gsi, vector);
- if (!rte) {
- rte = iosapic_alloc_rte();
- if (!rte) {
- printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__);
- return -ENOMEM;
- }
-
- rte_index = gsi - gsi_base;
- rte->rte_index = rte_index;
- rte->addr = iosapic_address;
- rte->gsi_base = gsi_base;
- rte->refcnt++;
- list_add_tail(&rte->rte_list, &iosapic_intr_info[vector].rtes);
- iosapic_intr_info[vector].count++;
- iosapic_lists[index].rtes_inuse++;
- }
- else if (vector_is_shared(vector)) {
- struct iosapic_intr_info *info = &iosapic_intr_info[vector];
- if (info->trigger != trigger || info->polarity != polarity) {
- printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__);
- return -EINVAL;
- }
- }
-
- iosapic_intr_info[vector].polarity = polarity;
- iosapic_intr_info[vector].dmode = delivery;
- iosapic_intr_info[vector].trigger = trigger;
-
- if (trigger == IOSAPIC_EDGE)
- irq_type = &irq_type_iosapic_edge;
- else
- irq_type = &irq_type_iosapic_level;
-
- idesc = irq_descp(vector);
- if (idesc->handler != irq_type) {
- if (idesc->handler != &no_irq_type)
- printk(KERN_WARNING "%s: changing vector %d from %s to %s\n",
- __FUNCTION__, vector, idesc->handler->typename, irq_type->typename);
- idesc->handler = irq_type;
- }
- return 0;
-}
-
-static unsigned int
-get_target_cpu (unsigned int gsi, int vector)
-{
-#ifdef CONFIG_SMP
- static int cpu = -1;
-
- /*
- * In case of vector shared by multiple RTEs, all RTEs that
- * share the vector need to use the same destination CPU.
- */
- if (!list_empty(&iosapic_intr_info[vector].rtes))
- return iosapic_intr_info[vector].dest;
-
- /*
- * If the platform supports redirection via XTP, let it
- * distribute interrupts.
- */
- if (smp_int_redirect & SMP_IRQ_REDIRECTION)
- return cpu_physical_id(smp_processor_id());
-
- /*
- * Some interrupts (ACPI SCI, for instance) are registered
- * before the BSP is marked as online.
- */
- if (!cpu_online(smp_processor_id()))
- return cpu_physical_id(smp_processor_id());
-
-#ifdef CONFIG_NUMA
- {
- int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
- cpumask_t cpu_mask;
-
- iosapic_index = find_iosapic(gsi);
- if (iosapic_index < 0 ||
- iosapic_lists[iosapic_index].node == MAX_NUMNODES)
- goto skip_numa_setup;
-
- cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
-
- for_each_cpu(numa_cpu, &cpu_mask) {
- if (!cpu_online(numa_cpu))
- cpumask_clear_cpu(numa_cpu, &cpu_mask);
- }
-
- num_cpus = cpumask_weight(&cpu_mask);
-
- if (!num_cpus)
- goto skip_numa_setup;
-
- /* Use vector assigment to distribute across cpus in node */
- cpu_index = vector % num_cpus;
-
- for (numa_cpu = cpumask_first(&cpu_mask) ; i < cpu_index ; i++)
- numa_cpu = cpumask_next(numa_cpu, &cpu_mask);
-
- if (numa_cpu != NR_CPUS)
- return cpu_physical_id(numa_cpu);
- }
-skip_numa_setup:
-#endif
- /*
- * Otherwise, round-robin interrupt vectors across all the
- * processors. (It'd be nice if we could be smarter in the
- * case of NUMA.)
- */
- do {
- if (++cpu >= NR_CPUS)
- cpu = 0;
- } while (!cpu_online(cpu));
-
- return cpu_physical_id(cpu);
-#else
- return cpu_physical_id(smp_processor_id());
-#endif
-}
-
-/*
- * ACPI can describe IOSAPIC interrupts via static tables and namespace
- * methods. This provides an interface to register those interrupts and
- * program the IOSAPIC RTE.
- */
-int
-iosapic_register_intr (unsigned int gsi,
- unsigned long polarity, unsigned long trigger)
-{
- int vector, mask = 1, err;
- unsigned int dest;
- unsigned long flags;
- struct iosapic_rte_info *rte;
- u32 low32;
-again:
- /*
- * If this GSI has already been registered (i.e., it's a
- * shared interrupt, or we lost a race to register it),
- * don't touch the RTE.
- */
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- vector = gsi_to_vector(gsi);
- if (vector > 0) {
- rte = gsi_vector_to_rte(gsi, vector);
- rte->refcnt++;
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return vector;
- }
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-
- /* If vector is running out, we try to find a sharable vector */
- vector = assign_irq_vector(AUTO_ASSIGN);
- if (vector < 0) {
- vector = iosapic_find_sharable_vector(trigger, polarity);
- if (vector < 0)
- return -ENOSPC;
- }
-
- spin_lock_irqsave(&irq_descp(vector)->lock, flags);
- spin_lock(&iosapic_lock);
- {
- if (gsi_to_vector(gsi) > 0) {
- if (list_empty(&iosapic_intr_info[vector].rtes))
- free_irq_vector(vector);
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
- goto again;
- }
-
- dest = get_target_cpu(gsi, vector);
- err = register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY,
- polarity, trigger);
- if (err < 0) {
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
- return err;
- }
-
- /*
- * If the vector is shared and already unmasked for
- * other interrupt sources, don't mask it.
- */
- low32 = iosapic_intr_info[vector].low32;
- if (vector_is_shared(vector) && !(low32 & IOSAPIC_MASK))
- mask = 0;
- set_rte(gsi, vector, dest, mask);
- }
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
-
- printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
- gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
- (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
- cpu_logical_id(dest), dest, vector);
-
- return vector;
-}
-
-void
-iosapic_unregister_intr (unsigned int gsi)
-{
- unsigned long flags;
- int irq, vector, index;
- irq_desc_t *idesc;
- u32 low32;
- unsigned long trigger, polarity;
- unsigned int dest;
- struct iosapic_rte_info *rte;
-
- /*
- * If the irq associated with the gsi is not found,
- * iosapic_unregister_intr() is unbalanced. We need to check
- * this again after getting locks.
- */
- irq = gsi_to_irq(gsi);
- if (irq < 0) {
- printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi);
- WARN_ON(1);
- return;
- }
- vector = irq_to_vector(irq);
-
- idesc = irq_descp(irq);
- spin_lock_irqsave(&idesc->lock, flags);
- spin_lock(&iosapic_lock);
- {
- if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) {
- printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi);
- WARN_ON(1);
- goto out;
- }
-
- if (--rte->refcnt > 0)
- goto out;
-
- /* Mask the interrupt */
- low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK;
- iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index), low32);
-
- /* Remove the rte entry from the list */
- list_del(&rte->rte_list);
- iosapic_intr_info[vector].count--;
- iosapic_free_rte(rte);
- index = find_iosapic(gsi);
- iosapic_lists[index].rtes_inuse--;
- WARN_ON(iosapic_lists[index].rtes_inuse < 0);
-
- trigger = iosapic_intr_info[vector].trigger;
- polarity = iosapic_intr_info[vector].polarity;
- dest = iosapic_intr_info[vector].dest;
- printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
- gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
- (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
- cpu_logical_id(dest), dest, vector);
-
- if (list_empty(&iosapic_intr_info[vector].rtes)) {
- /* Sanity check */
- BUG_ON(iosapic_intr_info[vector].count);
-
- /* Clear the interrupt controller descriptor */
- idesc->handler = &no_irq_type;
-
- /* Clear the interrupt information */
- memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
- iosapic_intr_info[vector].low32 |= IOSAPIC_MASK;
- INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
-
- if (idesc->action) {
- printk(KERN_ERR "interrupt handlers still exist on IRQ %u\n", irq);
- WARN_ON(1);
- }
-
- /* Free the interrupt vector */
- free_irq_vector(vector);
- }
- }
- out:
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&idesc->lock, flags);
-}
-
-/*
- * ACPI calls this when it finds an entry for a platform interrupt.
- * Note that the irq_base and IOSAPIC address must be set in iosapic_init().
- */
-int __init
-iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
- int iosapic_vector, u16 eid, u16 id,
- unsigned long polarity, unsigned long trigger)
-{
- static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
- unsigned char delivery;
- int vector, mask = 0;
- unsigned int dest = ((id << 8) | eid) & 0xffff;
-
- switch (int_type) {
- case ACPI_INTERRUPT_PMI:
- vector = iosapic_vector;
- /*
- * since PMI vector is alloc'd by FW(ACPI) not by kernel,
- * we need to make sure the vector is available
- */
- iosapic_reassign_vector(vector);
- delivery = IOSAPIC_PMI;
- break;
- case ACPI_INTERRUPT_INIT:
- vector = assign_irq_vector(AUTO_ASSIGN);
- if (vector < 0)
- panic("%s: out of interrupt vectors!\n", __FUNCTION__);
- delivery = IOSAPIC_INIT;
- break;
- case ACPI_INTERRUPT_CPEI:
- vector = IA64_CPE_VECTOR;
- delivery = IOSAPIC_LOWEST_PRIORITY;
- mask = 1;
- break;
- default:
- printk(KERN_ERR "iosapic_register_platform_irq(): invalid int type 0x%x\n", int_type);
- return -1;
- }
-
- register_intr(gsi, vector, delivery, polarity, trigger);
-
- printk(KERN_INFO "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
- int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
- int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
- (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
- cpu_logical_id(dest), dest, vector);
-
- set_rte(gsi, vector, dest, mask);
- return vector;
-}
-
-
-/*
- * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
- * Note that the gsi_base and IOSAPIC address must be set in iosapic_init().
- */
-void __init
-iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
- unsigned long polarity,
- unsigned long trigger)
-{
- int vector;
- unsigned int dest = cpu_physical_id(smp_processor_id());
-
- vector = isa_irq_to_vector(isa_irq);
-
- register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, polarity, trigger);
-
- DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
- isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
- polarity == IOSAPIC_POL_HIGH ? "high" : "low",
- cpu_logical_id(dest), dest, vector);
-
- set_rte(gsi, vector, dest, 1);
-}
-
-void __init
-iosapic_system_init (int system_pcat_compat)
-{
- int vector;
-
- for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) {
- iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
- INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); /* mark as unused */
- }
-
- pcat_compat = system_pcat_compat;
- if (pcat_compat) {
- /*
- * Disable the compatibility mode interrupts (8259 style), needs IN/OUT support
- * enabled.
- */
- printk(KERN_INFO "%s: Disabling PC-AT compatible 8259 interrupts\n", __FUNCTION__);
- outb(0xff, 0xA1);
- outb(0xff, 0x21);
- }
-}
-
-static inline int
-iosapic_alloc (void)
-{
- int index;
-
- for (index = 0; index < NR_IOSAPICS; index++)
- if (!iosapic_lists[index].addr)
- return index;
-
- printk(KERN_WARNING "%s: failed to allocate iosapic\n", __FUNCTION__);
- return -1;
-}
-
-static inline void
-iosapic_free (int index)
-{
- memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0]));
-}
-
-static inline int
-iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
-{
- int index;
- unsigned int gsi_end, base, end;
-
- /* check gsi range */
- gsi_end = gsi_base + ((ver >> 16) & 0xff);
- for (index = 0; index < NR_IOSAPICS; index++) {
- if (!iosapic_lists[index].addr)
- continue;
-
- base = iosapic_lists[index].gsi_base;
- end = base + iosapic_lists[index].num_rte - 1;
-
- if (gsi_base < base && gsi_end < base)
- continue;/* OK */
-
- if (gsi_base > end && gsi_end > end)
- continue; /* OK */
-
- return -EBUSY;
- }
- return 0;
-}
-
-int __devinit
-#ifndef XEN
-iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
-#else
-iosapic_init (unsigned long phys_addr, unsigned int gsi_base, unsigned int id)
-#endif
-{
- int num_rte, err, index;
- unsigned int isa_irq, ver;
- char __iomem *addr;
- unsigned long flags;
-
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- addr = ioremap(phys_addr, 0);
- ver = iosapic_version(addr);
-
- if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
- iounmap(addr);
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return err;
- }
-
- /*
- * The MAX_REDIR register holds the highest input pin
- * number (starting from 0).
- * We add 1 so that we can use it for number of pins (= RTEs)
- */
- num_rte = ((ver >> 16) & 0xff) + 1;
-
- index = iosapic_alloc();
- iosapic_lists[index].addr = addr;
- iosapic_lists[index].gsi_base = gsi_base;
- iosapic_lists[index].num_rte = num_rte;
-#ifdef XEN
- iosapic_lists[index].id = id;
-#endif
-#ifdef CONFIG_NUMA
- iosapic_lists[index].node = MAX_NUMNODES;
-#endif
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-
- if ((gsi_base == 0) && pcat_compat) {
- /*
- * Map the legacy ISA devices into the IOSAPIC data. Some of these may
- * get reprogrammed later on with data from the ACPI Interrupt Source
- * Override table.
- */
- for (isa_irq = 0; isa_irq < 16; ++isa_irq)
- iosapic_override_isa_irq(isa_irq, isa_irq, IOSAPIC_POL_HIGH, IOSAPIC_EDGE);
- }
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG
-int
-iosapic_remove (unsigned int gsi_base)
-{
- int index, err = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- index = find_iosapic(gsi_base);
- if (index < 0) {
- printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
- __FUNCTION__, gsi_base);
- goto out;
- }
-
- if (iosapic_lists[index].rtes_inuse) {
- err = -EBUSY;
- printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
- __FUNCTION__, gsi_base);
- goto out;
- }
-
- iounmap(iosapic_lists[index].addr);
- iosapic_free(index);
- }
- out:
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return err;
-}
-#endif /* CONFIG_HOTPLUG */
-
-#ifdef CONFIG_NUMA
-void __devinit
-map_iosapic_to_node(unsigned int gsi_base, int node)
-{
- int index;
-
- index = find_iosapic(gsi_base);
- if (index < 0) {
- printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
- __FUNCTION__, gsi_base);
- return;
- }
- iosapic_lists[index].node = node;
- return;
-}
-#endif
-
-#ifndef XEN
-static int __init iosapic_enable_kmalloc (void)
-{
- iosapic_kmalloc_ok = 1;
- return 0;
-}
-core_initcall (iosapic_enable_kmalloc);
-#endif
-
-#ifdef XEN
-/* nop for now */
-void set_irq_affinity_info(unsigned int irq, int hwid, int redir) {}
-
-static int iosapic_physbase_to_id(unsigned long physbase)
-{
- int i;
- unsigned long addr = physbase | __IA64_UNCACHED_OFFSET;
-
- for (i = 0; i < NR_IOSAPICS; i++) {
- if ((unsigned long)(iosapic_lists[i].addr) == addr)
- return i;
- }
-
- return -1;
-}
-
-int iosapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval)
-{
- int id;
- unsigned long flags;
-
- if ((id = (iosapic_physbase_to_id(physbase))) < 0)
- return id;
-
- spin_lock_irqsave(&iosapic_lock, flags);
- *pval = iosapic_read(iosapic_lists[id].addr, reg);
- spin_unlock_irqrestore(&iosapic_lock, flags);
-
- return 0;
-}
-
-int iosapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
-{
- unsigned int id, gsi, vec, xen_vec, dest, high32;
- char rte_index;
- struct iosapic *ios;
- struct iosapic_intr_info *info;
- struct rte_entry rte;
- unsigned long flags;
-
- if ((id = (iosapic_physbase_to_id(physbase))) < 0)
- return -EINVAL;
- ios = &iosapic_lists[id];
-
- /* Only handle first half of RTE update */
- if ((reg < 0x10) || (reg & 1))
- return 0;
-
- rte.val = val;
- rte_index = IOSAPIC_RTEINDEX(reg);
- vec = rte.lo.vector;
-#if 0
- /* Take PMI/NMI/INIT/EXTINT handled by xen */
- if (rte.delivery_mode > IOSAPIC_LOWEST_PRIORITY) {
- printk("Attempt to write IOSAPIC dest mode owned by xen!\n");
- printk("IOSAPIC/PIN = (%d/%d), lo = 0x%x\n",
- id, rte_index, val);
- return -EINVAL;
- }
-#endif
-
- /* Sanity check. Vector should be allocated before this update */
- if ((rte_index > ios->num_rte) ||
- ((vec > IA64_FIRST_DEVICE_VECTOR) &&
- (vec < IA64_LAST_DEVICE_VECTOR) &&
- (!test_bit(vec - IA64_FIRST_DEVICE_VECTOR, ia64_vector_mask))))
- return -EINVAL;
-
- gsi = ios->gsi_base + rte_index;
- xen_vec = gsi_to_vector(gsi);
- if (xen_vec >= 0 && test_bit(xen_vec, ia64_xen_vector)) {
- printk("WARN: GSI %d in use by Xen.\n", gsi);
- return -EINVAL;
- }
- info = &iosapic_intr_info[vec];
- spin_lock_irqsave(&irq_descp(vec)->lock, flags);
- spin_lock(&iosapic_lock);
- if (!gsi_vector_to_rte(gsi, vec)) {
- register_intr(gsi, vec, IOSAPIC_LOWEST_PRIORITY,
- rte.lo.polarity, rte.lo.trigger);
- } else if (vector_is_shared(vec)) {
- if ((info->trigger != rte.lo.trigger) ||
- (info->polarity != rte.lo.polarity)) {
- printk("WARN: can't override shared interrupt vec\n");
- printk("IOSAPIC/PIN = (%d/%d), ori = 0x%x, new = 0x%x\n",
- id, rte_index, info->low32, rte.val);
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vec)->lock, flags);
- return -EINVAL;
- }
-
- /* If the vector is shared and already unmasked for other
- * interrupt sources, don't mask it.
- *
- * Same check may also apply to single gsi pin, which may
- * be shared by devices belonging to different domain. But
- * let's see how to act later on demand.
- */
- if (!(info->low32 & IOSAPIC_MASK))
- rte.lo.mask = 0;
- }
-
- /* time to update physical RTE */
- dest = cpu_physical_id(smp_processor_id());
- high32 = (dest << IOSAPIC_DEST_SHIFT);
- iosapic_write(iosapic_lists[id].addr, reg + 1, high32);
- iosapic_write(iosapic_lists[id].addr, reg, rte.val);
- info->low32 = rte.val;
- info->dest = dest;
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vec)->lock, flags);
- return 0;
-}
-
-/* for vtd interrupt remapping. xen/drivers/vtd/intremap.c */
-int iosapic_get_nr_iosapics(void)
-{
- int index;
-
- for (index = NR_IOSAPICS - 1; index >= 0; index--) {
- if (iosapic_lists[index].addr)
- break;
- }
-
- return index + 1;
-}
-
-int iosapic_get_nr_pins(int index)
-{
- return iosapic_lists[index].num_rte;
-}
-#endif /* XEN */
diff --git a/xen/arch/ia64/linux-xen/irq_ia64.c b/xen/arch/ia64/linux-xen/irq_ia64.c
deleted file mode 100644
index abb895357b..0000000000
--- a/xen/arch/ia64/linux-xen/irq_ia64.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * linux/arch/ia64/kernel/irq.c
- *
- * Copyright (C) 1998-2001 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 6/10/99: Updated to bring in sync with x86 version to facilitate
- * support for SMP and different interrupt controllers.
- *
- * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
- * PCI to vector allocation routine.
- * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
- * Added CPU Hotplug handling for IPF.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-
-#include <linux/jiffies.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel_stat.h>
-#include <linux/slab.h>
-#include <linux/ptrace.h>
-#include <linux/random.h> /* for rand_initialize_irq() */
-#include <linux/signal.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/threads.h>
-#include <linux/bitops.h>
-
-#include <asm/delay.h>
-#include <asm/intrinsics.h>
-#include <asm/io.h>
-#include <asm/hw_irq.h>
-#include <asm/machvec.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-
-#ifdef XEN
-#include <xen/perfc.h>
-#endif
-
-#ifdef CONFIG_PERFMON
-# include <asm/perfmon.h>
-#endif
-
-#define IRQ_DEBUG 0
-
-/* default base addr of IPI table */
-void __iomem *ipi_base_addr = ((void __iomem *)
- (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
-
-/*
- * Legacy IRQ to IA-64 vector translation table.
- */
-__u8 isa_irq_to_vector_map[16] = {
- /* 8259 IRQ translation, first 16 entries */
- 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
- 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
-};
-EXPORT_SYMBOL(isa_irq_to_vector_map);
-
-#ifdef XEN
-unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
-#else
-static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
-#endif
-
-int
-assign_irq_vector (int irq)
-{
- int pos, vector;
- again:
- pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
- vector = IA64_FIRST_DEVICE_VECTOR + pos;
- if (vector > IA64_LAST_DEVICE_VECTOR)
- return -ENOSPC;
- if (test_and_set_bit(pos, ia64_vector_mask))
- goto again;
- return vector;
-}
-
-void
-free_irq_vector (int vector)
-{
- int pos;
-
- if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
- return;
-
- pos = vector - IA64_FIRST_DEVICE_VECTOR;
- if (!test_and_clear_bit(pos, ia64_vector_mask))
- printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
-}
-
-#ifdef CONFIG_SMP
-# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
-#else
-# define IS_RESCHEDULE(vec) (0)
-#endif
-/*
- * That's where the IVT branches when we get an external
- * interrupt. This branches to the correct hardware IRQ handler via
- * function ptr.
- */
-void
-ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
-{
- unsigned long saved_tpr;
-
-#ifdef XEN
- perfc_incr(irqs);
-#endif
-#if IRQ_DEBUG
-#ifdef XEN
- xen_debug_irq(vector, regs);
-#endif
- {
- unsigned long bsp, sp;
-
- /*
- * Note: if the interrupt happened while executing in
- * the context switch routine (ia64_switch_to), we may
- * get a spurious stack overflow here. This is
- * because the register and the memory stack are not
- * switched atomically.
- */
- bsp = ia64_getreg(_IA64_REG_AR_BSP);
- sp = ia64_getreg(_IA64_REG_SP);
-
- if ((sp - bsp) < 1024) {
- static unsigned char count;
- static long last_time;
-
- if (jiffies - last_time > 5*HZ)
- count = 0;
- if (++count < 5) {
- last_time = jiffies;
- printk("ia64_handle_irq: DANGER: less than "
- "1KB of free stack space!!\n"
- "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
- }
- }
- }
-#endif /* IRQ_DEBUG */
-
- /*
- * Always set TPR to limit maximum interrupt nesting depth to
- * 16 (without this, it would be ~240, which could easily lead
- * to kernel stack overflows).
- */
- irq_enter();
- saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
- ia64_srlz_d();
- while (vector != IA64_SPURIOUS_INT_VECTOR) {
- if (!IS_RESCHEDULE(vector)) {
- ia64_setreg(_IA64_REG_CR_TPR, vector);
- ia64_srlz_d();
-
- __do_IRQ(local_vector_to_irq(vector), regs);
-
- /*
- * Disable interrupts and send EOI:
- */
- local_irq_disable();
- ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
- }
- ia64_eoi();
- vector = ia64_get_ivr();
- }
- /*
- * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
- * handler needs to be able to wait for further keyboard interrupts, which can't
- * come through until ia64_eoi() has been done.
- */
- irq_exit();
-}
-
-#ifndef XEN
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * This function emulates a interrupt processing when a cpu is about to be
- * brought down.
- */
-void ia64_process_pending_intr(void)
-{
- ia64_vector vector;
- unsigned long saved_tpr;
- extern unsigned int vectors_in_migration[NR_IRQS];
-
- vector = ia64_get_ivr();
-
- irq_enter();
- saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
- ia64_srlz_d();
-
- /*
- * Perform normal interrupt style processing
- */
- while (vector != IA64_SPURIOUS_INT_VECTOR) {
- if (!IS_RESCHEDULE(vector)) {
- ia64_setreg(_IA64_REG_CR_TPR, vector);
- ia64_srlz_d();
-
- /*
- * Now try calling normal ia64_handle_irq as it would have got called
- * from a real intr handler. Try passing null for pt_regs, hopefully
- * it will work. I hope it works!.
- * Probably could shared code.
- */
- vectors_in_migration[local_vector_to_irq(vector)]=0;
- __do_IRQ(local_vector_to_irq(vector), NULL);
-
- /*
- * Disable interrupts and send EOI
- */
- local_irq_disable();
- ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
- }
- ia64_eoi();
- vector = ia64_get_ivr();
- }
- irq_exit();
-}
-#endif
-#endif
-
-
-#ifdef CONFIG_SMP
-extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
-
-static struct irqaction __read_mostly ipi_irqaction = {
- .handler = handle_IPI,
-#ifndef XEN
- .flags = SA_INTERRUPT,
-#endif
- .name = "IPI"
-};
-#endif
-
-static hw_irq_controller irq_type_ia64_lsapic = {
- .typename = "LSAPIC",
- .startup = irq_startup_none,
- .shutdown = irq_shutdown_none,
- .enable = irq_enable_none,
- .disable = irq_disable_none,
- .ack = irq_actor_none,
- .end = irq_actor_none
-};
-
-void
-register_percpu_irq (ia64_vector vec, struct irqaction *action)
-{
- irq_desc_t *desc;
-#ifndef XEN
- unsigned int irq;
-
- for (irq = 0; irq < NR_IRQS; ++irq)
- if (irq_to_vector(irq) == vec) {
- desc = irq_descp(irq);
- desc->status |= IRQ_PER_CPU;
- desc->handler = &irq_type_ia64_lsapic;
- if (action)
- setup_irq(irq, action);
- }
-#else
- desc = irq_descp(vec);
- desc->status |= IRQ_PER_CPU;
- desc->handler = &irq_type_ia64_lsapic;
- if (action)
- setup_vector(vec, action);
-#endif
-}
-
-#ifdef XEN
-int __init request_irq_vector(unsigned int vector,
- void (*handler)(int, void *, struct cpu_user_regs *),
- unsigned long irqflags, const char * devname, void *dev_id)
-{
- struct irqaction * action;
- int retval;
-
- /*
- * Sanity-check: shared interrupts must pass in a real dev-ID,
- * otherwise we'll have trouble later trying to figure out
- * which interrupt is which (messes up the interrupt freeing logic etc).
- * */
- if (vector >= NR_VECTORS)
- return -EINVAL;
- if (!handler)
- return -EINVAL;
-
- action = xmalloc(struct irqaction);
- if (!action)
- return -ENOMEM;
-
- action->handler = handler;
- action->name = devname;
- action->dev_id = dev_id;
-
- retval = setup_vector(vector, action);
- if (retval)
- xfree(action);
-
- return retval;
-}
-#endif
-
-void __init
-init_IRQ (void)
-{
-#ifdef XEN
- BUG_ON(init_irq_data());
-#endif
- register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
-#ifdef CONFIG_SMP
- register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
-#endif
-#ifdef CONFIG_PERFMON
- pfm_init_percpu();
-#endif
- platform_irq_init();
-}
-
-void
-ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
-{
- void __iomem *ipi_addr;
- unsigned long ipi_data;
- unsigned long phys_cpu_id;
-
-#ifdef CONFIG_SMP
- phys_cpu_id = cpu_physical_id(cpu);
-#else
- phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
-#endif
-
- /*
- * cpu number is in 8bit ID and 8bit EID
- */
-
- ipi_data = (delivery_mode << 8) | (vector & 0xff);
- ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
-
- writeq(ipi_data, ipi_addr);
-}
diff --git a/xen/arch/ia64/linux-xen/mca.c b/xen/arch/ia64/linux-xen/mca.c
deleted file mode 100644
index 917398dd50..0000000000
--- a/xen/arch/ia64/linux-xen/mca.c
+++ /dev/null
@@ -1,1963 +0,0 @@
-/*
- * File: mca.c
- * Purpose: Generic MCA handling layer
- *
- * Updated for latest kernel
- * Copyright (C) 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Copyright (C) 2002 Dell Inc.
- * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
- *
- * Copyright (C) 2002 Intel
- * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
- *
- * Copyright (C) 2001 Intel
- * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
- *
- * Copyright (C) 2000 Intel
- * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
- *
- * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
- * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
- *
- * 03/04/15 D. Mosberger Added INIT backtrace support.
- * 02/03/25 M. Domsch GUID cleanups
- *
- * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU
- * error flag, set SAL default return values, changed
- * error record structure to linked list, added init call
- * to sal_get_state_info_size().
- *
- * 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected
- * platform errors, completed code for logging of
- * corrected & uncorrected machine check errors, and
- * updated for conformance with Nov. 2000 revision of the
- * SAL 3.0 spec.
- * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
- * added min save state dump, added INIT handler.
- *
- * 2003-12-08 Keith Owens <kaos@sgi.com>
- * smp_call_function() must not be called from interrupt context (can
- * deadlock on tasklist_lock). Use keventd to call smp_call_function().
- *
- * 2004-02-01 Keith Owens <kaos@sgi.com>
- * Avoid deadlock when using printk() for MCA and INIT records.
- * Delete all record printing code, moved to salinfo_decode in user space.
- * Mark variables and functions static where possible.
- * Delete dead variables and functions.
- * Reorder to remove the need for forward declarations and to consolidate
- * related code.
- */
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kallsyms.h>
-#include <linux/smp_lock.h>
-#include <linux/bootmem.h>
-#include <linux/acpi.h>
-#include <linux/timer.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/workqueue.h>
-
-#include <asm/delay.h>
-#include <asm/machvec.h>
-#include <asm/meminit.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-#include <asm/sal.h>
-#include <asm/mca.h>
-
-#include <asm/irq.h>
-#include <asm/hw_irq.h>
-
-#ifdef XEN
-#include <xen/symbols.h>
-#include <xen/mm.h>
-#include <xen/console.h>
-#include <xen/event.h>
-#include <xen/softirq.h>
-#include <asm/xenmca.h>
-#include <linux/shutdown.h>
-#endif
-
-#if defined(IA64_MCA_DEBUG_INFO)
-# define IA64_MCA_DEBUG(fmt...) printk(fmt)
-#else
-# define IA64_MCA_DEBUG(fmt...)
-#endif
-
-/* Used by mca_asm.S */
-#ifndef XEN
-ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state;
-#else
-ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state[NR_CPUS];
-DEFINE_PER_CPU(u64, ia64_sal_to_os_handoff_state_addr);
-#endif
-ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state;
-u64 ia64_mca_serialize;
-DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
-DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
-DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
-DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
-
-unsigned long __per_cpu_mca[NR_CPUS];
-
-/* In mca_asm.S */
-extern void ia64_monarch_init_handler (void);
-extern void ia64_slave_init_handler (void);
-
-static ia64_mc_info_t ia64_mc_info;
-
-#ifdef XEN
-#define jiffies NOW()
-#undef HZ
-#define HZ 1000000000UL
-#endif
-
-#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
-#define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
-#define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
-#define CPE_HISTORY_LENGTH 5
-#define CMC_HISTORY_LENGTH 5
-
-#ifndef XEN
-static struct timer_list cpe_poll_timer;
-static struct timer_list cmc_poll_timer;
-#else
-#define mod_timer(timer, expires) set_timer(timer, expires)
-static struct timer cpe_poll_timer;
-static struct timer cmc_poll_timer;
-#endif
-/*
- * This variable tells whether we are currently in polling mode.
- * Start with this in the wrong state so we won't play w/ timers
- * before the system is ready.
- */
-static int cmc_polling_enabled = 1;
-
-/*
- * Clearing this variable prevents CPE polling from getting activated
- * in mca_late_init. Use it if your system doesn't provide a CPEI,
- * but encounters problems retrieving CPE logs. This should only be
- * necessary for debugging.
- */
-static int cpe_poll_enabled = 1;
-
-extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
-
-static int mca_init;
-
-/*
- * IA64_MCA log support
- */
-#define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */
-#define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */
-
-typedef struct ia64_state_log_s
-{
- spinlock_t isl_lock;
- int isl_index;
- unsigned long isl_count;
- ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
-} ia64_state_log_t;
-
-static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
-
-#ifndef XEN
-#define IA64_LOG_ALLOCATE(it, size) \
- {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
- (ia64_err_rec_t *)alloc_bootmem(size); \
- ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
- (ia64_err_rec_t *)alloc_bootmem(size);}
-#else
-#define IA64_LOG_ALLOCATE(it, size) \
- do { \
- unsigned int pageorder; \
- struct page_info *page; \
- pageorder = get_order_from_bytes(size); \
- page = alloc_domheap_pages(NULL, pageorder, 0); \
- ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
- page? (ia64_err_rec_t *)page_to_virt(page): NULL; \
- page = alloc_domheap_pages(NULL, pageorder, 0); \
- ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
- page? (ia64_err_rec_t *)page_to_virt(page): NULL; \
- } while(0)
-#endif
-
-#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
-#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
-#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
-#define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
-#define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
-#define IA64_LOG_INDEX_INC(it) \
- {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
- ia64_state_log[it].isl_count++;}
-#define IA64_LOG_INDEX_DEC(it) \
- ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
-#define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
-#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
-#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
-
-#ifdef XEN
-sal_queue_entry_t sal_entry[NR_CPUS][IA64_MAX_LOG_TYPES];
-struct list_head *sal_queue, sal_log_queues[IA64_MAX_LOG_TYPES];
-sal_log_record_header_t *sal_record;
-DEFINE_SPINLOCK(sal_queue_lock);
-#endif
-
-/*
- * ia64_log_init
- * Reset the OS ia64 log buffer
- * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
- * Outputs : None
- */
-static void
-ia64_log_init(int sal_info_type)
-{
- u64 max_size = 0;
-
- IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
- IA64_LOG_LOCK_INIT(sal_info_type);
-
- // SAL will tell us the maximum size of any error record of this type
- max_size = ia64_sal_get_state_info_size(sal_info_type);
- if (!max_size)
- /* alloc_bootmem() doesn't like zero-sized allocations! */
- return;
-
- // set up OS data structures to hold error info
- IA64_LOG_ALLOCATE(sal_info_type, max_size);
- memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
- memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
-
-#ifdef XEN
- if (sal_record == NULL) {
- unsigned int pageorder;
- struct page_info *page;
- pageorder = get_order_from_bytes(max_size);
- page = alloc_domheap_pages(NULL, pageorder, 0);
- BUG_ON(page == NULL);
- sal_record = (sal_log_record_header_t *)page_to_virt(page);
- BUG_ON(sal_record == NULL);
- }
-#endif
-}
-
-#ifndef XEN
-/*
- * ia64_log_get
- *
- * Get the current MCA log from SAL and copy it into the OS log buffer.
- *
- * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
- * irq_safe whether you can use printk at this point
- * Outputs : size (total record length)
- * *buffer (ptr to error record)
- *
- */
-static u64
-ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
-{
- sal_log_record_header_t *log_buffer;
- u64 total_len = 0;
- int s;
-
- IA64_LOG_LOCK(sal_info_type);
-
- /* Get the process state information */
- log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
-
- total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
-
- if (total_len) {
- IA64_LOG_INDEX_INC(sal_info_type);
- IA64_LOG_UNLOCK(sal_info_type);
- if (irq_safe) {
- IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
- "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);
- }
- *buffer = (u8 *) log_buffer;
- return total_len;
- } else {
- IA64_LOG_UNLOCK(sal_info_type);
- return 0;
- }
-}
-
-/*
- * ia64_mca_log_sal_error_record
- *
- * This function retrieves a specified error record type from SAL
- * and wakes up any processes waiting for error records.
- *
- * Inputs : sal_info_type (Type of error record MCA/CMC/CPE/INIT)
- */
-static void
-ia64_mca_log_sal_error_record(int sal_info_type)
-{
- u8 *buffer;
- sal_log_record_header_t *rh;
- u64 size;
- int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT;
-#ifdef IA64_MCA_DEBUG_INFO
- static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
-#endif
-
- size = ia64_log_get(sal_info_type, &buffer, irq_safe);
- if (!size)
- return;
-
- salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
-
- if (irq_safe)
- IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
- smp_processor_id(),
- sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
-
- /* Clear logs from corrected errors in case there's no user-level logger */
- rh = (sal_log_record_header_t *)buffer;
- if (rh->severity == sal_log_severity_corrected)
- ia64_sal_clear_state_info(sal_info_type);
-}
-#else /* !XEN */
-/*
- * ia64_log_queue
- *
- * Get the current MCA log from SAL and copy it into the OS log buffer.
- *
- * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
- * Outputs : size (total record length)
- * *buffer (ptr to error record)
- *
- */
-static u64
-ia64_log_queue(int sal_info_type, int virq)
-{
- sal_log_record_header_t *log_buffer;
- u64 total_len = 0;
- int s;
- sal_queue_entry_t *e;
- unsigned long flags;
-
- IA64_LOG_LOCK(sal_info_type);
-
- /* Get the process state information */
- log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
-
- total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
-
- if (total_len) {
- int queue_type;
- int cpuid = smp_processor_id();
-
- spin_lock_irqsave(&sal_queue_lock, flags);
-
- if (sal_info_type == SAL_INFO_TYPE_MCA && virq == VIRQ_MCA_CMC)
- queue_type = SAL_INFO_TYPE_CMC;
- else
- queue_type = sal_info_type;
-
- /* Skip if sal_entry is already listed in sal_queue */
- list_for_each_entry(e, &sal_queue[queue_type], list) {
- if (e == &sal_entry[cpuid][queue_type])
- goto found;
- }
- e = &sal_entry[cpuid][queue_type];
- memset(e, 0, sizeof(sal_queue_entry_t));
- e->cpuid = cpuid;
- e->sal_info_type = sal_info_type;
- e->vector = IA64_CMC_VECTOR;
- e->virq = virq;
- e->length = total_len;
-
- list_add_tail(&e->list, &sal_queue[queue_type]);
-
- found:
- spin_unlock_irqrestore(&sal_queue_lock, flags);
-
- IA64_LOG_INDEX_INC(sal_info_type);
- IA64_LOG_UNLOCK(sal_info_type);
- if (sal_info_type != SAL_INFO_TYPE_MCA &&
- sal_info_type != SAL_INFO_TYPE_INIT) {
- IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
- "Record length = %ld\n", __FUNCTION__,
- sal_info_type, total_len);
- }
- return total_len;
- } else {
- IA64_LOG_UNLOCK(sal_info_type);
- return 0;
- }
-}
-#endif /* !XEN */
-
-/*
- * platform dependent error handling
- */
-#ifndef PLATFORM_MCA_HANDLERS
-
-#ifdef CONFIG_ACPI
-
-#ifdef XEN
-/**
- * Copy from linux/kernel/irq/manage.c
- *
- * disable_irq_nosync - disable an irq without waiting
- * @irq: Interrupt to disable
- *
- * Disable the selected interrupt line. Disables and Enables are
- * nested.
- * Unlike disable_irq(), this function does not ensure existing
- * instances of the IRQ handler have completed before returning.
- *
- * This function may be called from IRQ context.
- */
-void disable_irq_nosync(unsigned int irq)
-{
- irq_desc_t *desc = irq_desc + irq;
- unsigned long flags;
-
- if (irq >= NR_IRQS)
- return;
-
- spin_lock_irqsave(&desc->lock, flags);
- if (!desc->arch.depth++) {
- desc->status |= IRQ_DISABLED;
- desc->handler->disable(desc);
- }
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-/**
- * Copy from linux/kernel/irq/manage.c
- *
- * enable_irq - enable handling of an irq
- * @irq: Interrupt to enable
- *
- * Undoes the effect of one call to disable_irq(). If this
- * matches the last disable, processing of interrupts on this
- * IRQ line is re-enabled.
- *
- * This function may be called from IRQ context.
- */
-void enable_irq(unsigned int irq)
-{
- irq_desc_t *desc = irq_desc + irq;
- unsigned long flags;
-
- if (irq >= NR_IRQS)
- return;
-
- spin_lock_irqsave(&desc->lock, flags);
- switch (desc->arch.depth) {
- case 0:
- WARN_ON(1);
- break;
- case 1: {
- unsigned int status = desc->status & ~IRQ_DISABLED;
-
- desc->status = status;
- if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
- desc->status = status | IRQ_REPLAY;
- hw_resend_irq(desc->handler,irq);
- }
- desc->handler->enable(desc);
- /* fall-through */
- }
- default:
- desc->arch.depth--;
- }
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-#endif /* XEN */
-
-int cpe_vector = -1;
-
-static irqreturn_t
-ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
-{
- static unsigned long cpe_history[CPE_HISTORY_LENGTH];
- static int index;
- static DEFINE_SPINLOCK(cpe_history_lock);
-
- IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
- __FUNCTION__, cpe_irq, smp_processor_id());
-
- /* SAL spec states this should run w/ interrupts enabled */
- local_irq_enable();
-
-#ifndef XEN
- /* Get the CPE error record and log it */
- ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
-#else
- ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE);
- /* CPE error does not inform to dom0 but the following codes are
- reserved for future implementation */
-/* send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CPE); */
-#endif
-
- spin_lock(&cpe_history_lock);
- if (!cpe_poll_enabled && cpe_vector >= 0) {
-
- int i, count = 1; /* we know 1 happened now */
- unsigned long now = jiffies;
-
- for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
- if (now - cpe_history[i] <= HZ)
- count++;
- }
-
- IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
- if (count >= CPE_HISTORY_LENGTH) {
-
- cpe_poll_enabled = 1;
- spin_unlock(&cpe_history_lock);
- disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
-
- /*
- * Corrected errors will still be corrected, but
- * make sure there's a log somewhere that indicates
- * something is generating more than we can handle.
- */
- printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
-
- mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
-
- /* lock already released, get out now */
- return IRQ_HANDLED;
- } else {
- cpe_history[index++] = now;
- if (index == CPE_HISTORY_LENGTH)
- index = 0;
- }
- }
- spin_unlock(&cpe_history_lock);
- return IRQ_HANDLED;
-}
-
-#endif /* CONFIG_ACPI */
-
-static void
-show_min_state (pal_min_state_area_t *minstate)
-{
- u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;
- u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;
-
- printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
- printk("pr\t\t%016lx\n", minstate->pmsa_pr);
- printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);
- printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
- printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
- printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
- printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
- printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
- printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
- printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
- printk("b1\t\t%016lx ", minstate->pmsa_br1);
- print_symbol("%s\n", minstate->pmsa_br1);
-
- printk("\nstatic registers r0-r15:\n");
- printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
- 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);
- printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_gr[3], minstate->pmsa_gr[4],
- minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
- printk(" r8-11 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_gr[7], minstate->pmsa_gr[8],
- minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
- printk("r12-15 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_gr[11], minstate->pmsa_gr[12],
- minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
-
- printk("\nbank 0:\n");
- printk("r16-19 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
- minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
- printk("r20-23 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
- minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
- printk("r24-27 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
- minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
- printk("r28-31 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
- minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
-
- printk("\nbank 1:\n");
- printk("r16-19 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
- minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
- printk("r20-23 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
- minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
- printk("r24-27 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
- minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
- printk("r28-31 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
- minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
-}
-
-static void
-fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw)
-{
- u64 *dst_banked, *src_banked, bit, shift, nat_bits;
- int i;
-
- /*
- * First, update the pt-regs and switch-stack structures with the contents stored
- * in the min-state area:
- */
- if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
- pt->cr_ipsr = ms->pmsa_xpsr;
- pt->cr_iip = ms->pmsa_xip;
- pt->cr_ifs = ms->pmsa_xfs;
- } else {
- pt->cr_ipsr = ms->pmsa_ipsr;
- pt->cr_iip = ms->pmsa_iip;
- pt->cr_ifs = ms->pmsa_ifs;
- }
- pt->ar_rsc = ms->pmsa_rsc;
- pt->pr = ms->pmsa_pr;
- pt->r1 = ms->pmsa_gr[0];
- pt->r2 = ms->pmsa_gr[1];
- pt->r3 = ms->pmsa_gr[2];
- sw->r4 = ms->pmsa_gr[3];
- sw->r5 = ms->pmsa_gr[4];
- sw->r6 = ms->pmsa_gr[5];
- sw->r7 = ms->pmsa_gr[6];
- pt->r8 = ms->pmsa_gr[7];
- pt->r9 = ms->pmsa_gr[8];
- pt->r10 = ms->pmsa_gr[9];
- pt->r11 = ms->pmsa_gr[10];
- pt->r12 = ms->pmsa_gr[11];
- pt->r13 = ms->pmsa_gr[12];
- pt->r14 = ms->pmsa_gr[13];
- pt->r15 = ms->pmsa_gr[14];
- dst_banked = &pt->r16; /* r16-r31 are contiguous in struct pt_regs */
- src_banked = ms->pmsa_bank1_gr;
- for (i = 0; i < 16; ++i)
- dst_banked[i] = src_banked[i];
- pt->b0 = ms->pmsa_br0;
- sw->b1 = ms->pmsa_br1;
-
- /* construct the NaT bits for the pt-regs structure: */
-# define PUT_NAT_BIT(dst, addr) \
- do { \
- bit = nat_bits & 1; nat_bits >>= 1; \
- shift = ((unsigned long) addr >> 3) & 0x3f; \
- dst = ((dst) & ~(1UL << shift)) | (bit << shift); \
- } while (0)
-
- /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
- shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
- nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));
-
- PUT_NAT_BIT(sw->caller_unat, &pt->r1);
- PUT_NAT_BIT(sw->caller_unat, &pt->r2);
- PUT_NAT_BIT(sw->caller_unat, &pt->r3);
- PUT_NAT_BIT(sw->ar_unat, &sw->r4);
- PUT_NAT_BIT(sw->ar_unat, &sw->r5);
- PUT_NAT_BIT(sw->ar_unat, &sw->r6);
- PUT_NAT_BIT(sw->ar_unat, &sw->r7);
- PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat, &pt->r9);
- PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11);
- PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13);
- PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15);
- nat_bits >>= 16; /* skip over bank0 NaT bits */
- PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17);
- PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19);
- PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21);
- PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23);
- PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25);
- PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27);
- PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29);
- PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31);
-}
-
-#ifdef XEN
-static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
-static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;
-static atomic_t num_stopped_cpus = ATOMIC_INIT(0);
-extern void show_stack (struct task_struct *, unsigned long *);
-
-#define CPU_FLUSH_RETRY_MAX 5
-static void
-init_cache_flush (void)
-{
- unsigned long flags;
- int i;
- s64 rval = 0;
- u64 vector, progress = 0;
-
- for (i = 0; i < CPU_FLUSH_RETRY_MAX; i++) {
- local_irq_save(flags);
- rval = ia64_pal_cache_flush(PAL_CACHE_TYPE_INSTRUCTION_DATA,
- 0, &progress, &vector);
- local_irq_restore(flags);
- if (rval == 0){
- printk("\nPAL cache flush success\n");
- return;
- }
- }
- printk("\nPAL cache flush failed. status=%ld\n",rval);
-}
-
-static void inline
-save_ksp (struct unw_frame_info *info)
-{
- current->arch._thread.ksp = (__u64)(info->sw) - 16;
- wmb();
- init_cache_flush();
-}
-
-static void
-freeze_cpu_osinit (struct unw_frame_info *info, void *arg)
-{
- save_ksp(info);
- atomic_inc(&num_stopped_cpus);
- printk("%s: CPU%d init handler done\n",
- __FUNCTION__, smp_processor_id());
- for (;;)
- local_irq_disable();
-}
-
-/* FIXME */
-static void
-try_crashdump(struct unw_frame_info *info, void *arg)
-{
- save_ksp(info);
- printk("\nINIT dump complete. Please reboot now.\n");
- for (;;)
- local_irq_disable();
-}
-#endif /* XEN */
-
-static void
-init_handler_platform (pal_min_state_area_t *ms,
- struct pt_regs *pt, struct switch_stack *sw)
-{
- struct unw_frame_info info;
-
- /* if a kernel debugger is available call it here else just dump the registers */
-
- /*
- * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
- * generated via the BMC's command-line interface, but since the console is on the
- * same serial line, the user will need some time to switch out of the BMC before
- * the dump begins.
- */
- printk("Delaying for 5 seconds...\n");
- udelay(5*1000000);
-#ifdef XEN
- fetch_min_state(ms, pt, sw);
- spin_lock(&show_stack_lock);
-#endif
- show_min_state(ms);
-
-#ifdef XEN
- printk("Backtrace of current vcpu (vcpu_id %d of domid %d)\n",
- current->vcpu_id, current->domain->domain_id);
-#else
- printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
- fetch_min_state(ms, pt, sw);
-#endif
- unw_init_from_interruption(&info, current, pt, sw);
- ia64_do_show_stack(&info, NULL);
-#ifdef XEN
- spin_unlock(&show_stack_lock);
-
- if (spin_trylock(&init_dump_lock)) {
- struct domain *d;
- struct vcpu *v;
-#ifdef CONFIG_SMP
- int other_cpus = num_online_cpus() - 1;
- int wait = 1000 * other_cpus;
-
- while ((atomic_read(&num_stopped_cpus) != other_cpus) && wait--)
- udelay(1000);
- if (other_cpus && wait < 0)
- printk("timeout %d\n", atomic_read(&num_stopped_cpus));
-#endif
- if (opt_noreboot) {
- /* this route is for dump routine */
- unw_init_running(try_crashdump, pt);
- } else {
- rcu_read_lock(&domlist_read_lock);
- for_each_domain(d) {
- for_each_vcpu(d, v) {
- printk("Backtrace of current vcpu "
- "(vcpu_id %d of domid %d)\n",
- v->vcpu_id, d->domain_id);
- show_stack(v, NULL);
- }
- }
- rcu_read_unlock(&domlist_read_lock);
- }
- }
- unw_init_running(freeze_cpu_osinit, NULL);
-#else /* XEN */
-#ifdef CONFIG_SMP
- /* read_trylock() would be handy... */
- if (!tasklist_lock.write_lock)
- read_lock(&tasklist_lock);
-#endif
- {
- struct task_struct *g, *t;
- do_each_thread (g, t) {
- if (t == current)
- continue;
-
- printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
- show_stack(t, NULL);
- } while_each_thread (g, t);
- }
-#ifdef CONFIG_SMP
- if (!tasklist_lock.write_lock)
- read_unlock(&tasklist_lock);
-#endif
-
- printk("\nINIT dump complete. Please reboot now.\n");
-#endif /* XEN */
- while (1); /* hang city if no debugger */
-}
-
-#ifdef CONFIG_ACPI
-/*
- * ia64_mca_register_cpev
- *
- * Register the corrected platform error vector with SAL.
- *
- * Inputs
- * cpev Corrected Platform Error Vector number
- *
- * Outputs
- * None
- */
-static void
-ia64_mca_register_cpev (int cpev)
-{
- /* Register the CPE interrupt vector with SAL */
- struct ia64_sal_retval isrv;
-
- isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
- if (isrv.status) {
- printk(KERN_ERR "Failed to register Corrected Platform "
- "Error interrupt vector with SAL (status %ld)\n", isrv.status);
- return;
- }
-
- IA64_MCA_DEBUG("%s: corrected platform error "
- "vector %#x registered\n", __FUNCTION__, cpev);
-}
-#endif /* CONFIG_ACPI */
-
-#endif /* PLATFORM_MCA_HANDLERS */
-
-/*
- * ia64_mca_cmc_vector_setup
- *
- * Setup the corrected machine check vector register in the processor.
- * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
- * This function is invoked on a per-processor basis.
- *
- * Inputs
- * None
- *
- * Outputs
- * None
- */
-void
-ia64_mca_cmc_vector_setup (void)
-{
- cmcv_reg_t cmcv;
-
- cmcv.cmcv_regval = 0;
- cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
- cmcv.cmcv_vector = IA64_CMC_VECTOR;
- ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
-
- IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x registered.\n",
- __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
-
- IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
- __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
-}
-
-/*
- * ia64_mca_cmc_vector_disable
- *
- * Mask the corrected machine check vector register in the processor.
- * This function is invoked on a per-processor basis.
- *
- * Inputs
- * dummy(unused)
- *
- * Outputs
- * None
- */
-static void
-ia64_mca_cmc_vector_disable (void *dummy)
-{
- cmcv_reg_t cmcv;
-
- cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
-
- cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
- ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
-
- IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x disabled.\n",
- __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
-}
-
-/*
- * ia64_mca_cmc_vector_enable
- *
- * Unmask the corrected machine check vector register in the processor.
- * This function is invoked on a per-processor basis.
- *
- * Inputs
- * dummy(unused)
- *
- * Outputs
- * None
- */
-static void
-ia64_mca_cmc_vector_enable (void *dummy)
-{
- cmcv_reg_t cmcv;
-
- cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
-
- cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
- ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
-
- IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x enabled.\n",
- __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
-}
-
-#ifndef XEN
-/*
- * ia64_mca_cmc_vector_disable_keventd
- *
- * Called via keventd (smp_call_function() is not safe in interrupt context) to
- * disable the cmc interrupt vector.
- */
-static void
-ia64_mca_cmc_vector_disable_keventd(void *unused)
-{
- on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
-}
-
-/*
- * ia64_mca_cmc_vector_enable_keventd
- *
- * Called via keventd (smp_call_function() is not safe in interrupt context) to
- * enable the cmc interrupt vector.
- */
-static void
-ia64_mca_cmc_vector_enable_keventd(void *unused)
-{
- on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
-}
-#endif /* !XEN */
-
-/*
- * ia64_mca_wakeup_ipi_wait
- *
- * Wait for the inter-cpu interrupt to be sent by the
- * monarch processor once it is done with handling the
- * MCA.
- *
- * Inputs : None
- * Outputs : None
- */
-static void
-ia64_mca_wakeup_ipi_wait(void)
-{
- int irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
- int irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
- u64 irr = 0;
-
- do {
- switch(irr_num) {
- case 0:
- irr = ia64_getreg(_IA64_REG_CR_IRR0);
- break;
- case 1:
- irr = ia64_getreg(_IA64_REG_CR_IRR1);
- break;
- case 2:
- irr = ia64_getreg(_IA64_REG_CR_IRR2);
- break;
- case 3:
- irr = ia64_getreg(_IA64_REG_CR_IRR3);
- break;
- }
- cpu_relax();
- } while (!(irr & (1UL << irr_bit))) ;
-}
-
-/*
- * ia64_mca_wakeup
- *
- * Send an inter-cpu interrupt to wake-up a particular cpu
- * and mark that cpu to be out of rendez.
- *
- * Inputs : cpuid
- * Outputs : None
- */
-static void
-ia64_mca_wakeup(int cpu)
-{
- platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
- ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
-
-}
-
-/*
- * ia64_mca_wakeup_all
- *
- * Wakeup all the cpus which have rendez'ed previously.
- *
- * Inputs : None
- * Outputs : None
- */
-static void
-ia64_mca_wakeup_all(void)
-{
- int cpu;
-
- /* Clear the Rendez checkin flag for all cpus */
- for(cpu = 0; cpu < NR_CPUS; cpu++) {
- if (!cpu_online(cpu))
- continue;
- if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
- ia64_mca_wakeup(cpu);
- }
-
-}
-
-/*
- * ia64_mca_rendez_interrupt_handler
- *
- * This is handler used to put slave processors into spinloop
- * while the monarch processor does the mca handling and later
- * wake each slave up once the monarch is done.
- *
- * Inputs : None
- * Outputs : None
- */
-static irqreturn_t
-ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
-{
- unsigned long flags;
- int cpu = smp_processor_id();
-
- /* Mask all interrupts */
- local_irq_save(flags);
-
- ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
- /* Register with the SAL monarch that the slave has
- * reached SAL
- */
- ia64_sal_mc_rendez();
-
- /* Wait for the wakeup IPI from the monarch
- * This waiting is done by polling on the wakeup-interrupt
- * vector bit in the processor's IRRs
- */
- ia64_mca_wakeup_ipi_wait();
-
- /* Enable all interrupts */
- local_irq_restore(flags);
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_wakeup_int_handler
- *
- * The interrupt handler for processing the inter-cpu interrupt to the
- * slave cpu which was spinning in the rendez loop.
- * Since this spinning is done by turning off the interrupts and
- * polling on the wakeup-interrupt bit in the IRR, there is
- * nothing useful to be done in the handler.
- *
- * Inputs : wakeup_irq (Wakeup-interrupt bit)
- * arg (Interrupt handler specific argument)
- * ptregs (Exception frame at the time of the interrupt)
- * Outputs : None
- *
- */
-static irqreturn_t
-ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
-{
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_return_to_sal_check
- *
- * This is function called before going back from the OS_MCA handler
- * to the OS_MCA dispatch code which finally takes the control back
- * to the SAL.
- * The main purpose of this routine is to setup the OS_MCA to SAL
- * return state which can be used by the OS_MCA dispatch code
- * just before going back to SAL.
- *
- * Inputs : None
- * Outputs : None
- */
-
-static void
-ia64_return_to_sal_check(int recover)
-{
-#ifdef XEN
- int cpu = smp_processor_id();
-#endif
-
- /* Copy over some relevant stuff from the sal_to_os_mca_handoff
- * so that it can be used at the time of os_mca_to_sal_handoff
- */
-#ifdef XEN
- ia64_os_to_sal_handoff_state.imots_sal_gp =
- ia64_sal_to_os_handoff_state[cpu].imsto_sal_gp;
-
- ia64_os_to_sal_handoff_state.imots_sal_check_ra =
- ia64_sal_to_os_handoff_state[cpu].imsto_sal_check_ra;
-#else
- ia64_os_to_sal_handoff_state.imots_sal_gp =
- ia64_sal_to_os_handoff_state.imsto_sal_gp;
-
- ia64_os_to_sal_handoff_state.imots_sal_check_ra =
- ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
-#endif
-
- if (recover)
- ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;
- else
- ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT;
-
- /* Default = tell SAL to return to same context */
- ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
-
-#ifdef XEN
- ia64_os_to_sal_handoff_state.imots_new_min_state =
- (u64 *)ia64_sal_to_os_handoff_state[cpu].pal_min_state;
-#else
- ia64_os_to_sal_handoff_state.imots_new_min_state =
- (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
-#endif
-
-}
-
-/* Function pointer for extra MCA recovery */
-int (*ia64_mca_ucmc_extension)
- (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)
- = NULL;
-
-int
-ia64_reg_MCA_extension(void *fn)
-{
- if (ia64_mca_ucmc_extension)
- return 1;
-
- ia64_mca_ucmc_extension = fn;
- return 0;
-}
-
-void
-ia64_unreg_MCA_extension(void)
-{
- if (ia64_mca_ucmc_extension)
- ia64_mca_ucmc_extension = NULL;
-}
-
-EXPORT_SYMBOL(ia64_reg_MCA_extension);
-EXPORT_SYMBOL(ia64_unreg_MCA_extension);
-
-/*
- * ia64_mca_ucmc_handler
- *
- * This is uncorrectable machine check handler called from OS_MCA
- * dispatch code which is in turn called from SAL_CHECK().
- * This is the place where the core of OS MCA handling is done.
- * Right now the logs are extracted and displayed in a well-defined
- * format. This handler code is supposed to be run only on the
- * monarch processor. Once the monarch is done with MCA handling
- * further MCA logging is enabled by clearing logs.
- * Monarch also has the duty of sending wakeup-IPIs to pull the
- * slave processors out of rendezvous spinloop.
- *
- * Inputs : None
- * Outputs : None
- */
-void
-ia64_mca_ucmc_handler(void)
-{
-#ifdef XEN
- int cpu = smp_processor_id();
- pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
- &ia64_sal_to_os_handoff_state[cpu].proc_state_param;
-#else
- pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
- &ia64_sal_to_os_handoff_state.proc_state_param;
-#endif
- int recover;
-
-#ifndef XEN
- /* Get the MCA error record and log it */
- ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
-#else
- ia64_log_queue(SAL_INFO_TYPE_MCA, VIRQ_MCA_CMC);
- send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
-#endif
-
- /* TLB error is only exist in this SAL error record */
- recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
- /* other error recovery */
-#ifndef XEN
- || (ia64_mca_ucmc_extension
- && ia64_mca_ucmc_extension(
- IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
- &ia64_sal_to_os_handoff_state,
- &ia64_os_to_sal_handoff_state));
-#else
- ;
-#endif
-
-#ifndef XEN
- if (recover) {
- sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
- rh->severity = sal_log_severity_corrected;
- ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
- }
-#endif
- /*
- * Wakeup all the processors which are spinning in the rendezvous
- * loop.
- */
- ia64_mca_wakeup_all();
-
- /* Return to SAL */
- ia64_return_to_sal_check(recover);
-}
-
-#ifndef XEN
-static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
-static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
-#endif
-
-/*
- * ia64_mca_cmc_int_handler
- *
- * This is corrected machine check interrupt handler.
- * Right now the logs are extracted and displayed in a well-defined
- * format.
- *
- * Inputs
- * interrupt number
- * client data arg ptr
- * saved registers ptr
- *
- * Outputs
- * None
- */
-static irqreturn_t
-ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
-{
- static unsigned long cmc_history[CMC_HISTORY_LENGTH];
- static int index;
- static DEFINE_SPINLOCK(cmc_history_lock);
-
- IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
- __FUNCTION__, cmc_irq, smp_processor_id());
-
- /* SAL spec states this should run w/ interrupts enabled */
- local_irq_enable();
-
-#ifndef XEN
- /* Get the CMC error record and log it */
- ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
-#else
- ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
- send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
-#endif
-
- spin_lock(&cmc_history_lock);
- if (!cmc_polling_enabled) {
- int i, count = 1; /* we know 1 happened now */
- unsigned long now = jiffies;
-
- for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
- if (now - cmc_history[i] <= HZ)
- count++;
- }
-
- IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
- if (count >= CMC_HISTORY_LENGTH) {
-
- cmc_polling_enabled = 1;
- spin_unlock(&cmc_history_lock);
-#ifndef XEN /* XXX FIXME */
- schedule_work(&cmc_disable_work);
-#else
- cpumask_raise_softirq(&cpu_online_map,
- CMC_DISABLE_SOFTIRQ);
-#endif
-
- /*
- * Corrected errors will still be corrected, but
- * make sure there's a log somewhere that indicates
- * something is generating more than we can handle.
- */
- printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
-
- mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
-
- /* lock already released, get out now */
- return IRQ_HANDLED;
- } else {
- cmc_history[index++] = now;
- if (index == CMC_HISTORY_LENGTH)
- index = 0;
- }
- }
- spin_unlock(&cmc_history_lock);
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_cmc_int_caller
- *
- * Triggered by sw interrupt from CMC polling routine. Calls
- * real interrupt handler and either triggers a sw interrupt
- * on the next cpu or does cleanup at the end.
- *
- * Inputs
- * interrupt number
- * client data arg ptr
- * saved registers ptr
- * Outputs
- * handled
- */
-static irqreturn_t
-ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
-{
- static int start_count = -1;
- unsigned int cpuid;
-
- cpuid = smp_processor_id();
-
- /* If first cpu, update count */
- if (start_count == -1)
- start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
-
-#ifndef XEN
- ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
-#else
- IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n",
- __FUNCTION__, cmc_irq, smp_processor_id());
- ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
-#endif
-
- for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
-
- if (cpuid < NR_CPUS) {
- platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
- } else {
- /* If no log record, switch out of polling mode */
- if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
-
- printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
-#ifndef XEN /* XXX FIXME */
- schedule_work(&cmc_enable_work);
-#else
- cpumask_raise_softirq(&cpu_online_map,
- CMC_ENABLE_SOFTIRQ);
-#endif
- cmc_polling_enabled = 0;
-
- } else {
-
- mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
- }
-
- start_count = -1;
- }
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_cmc_poll
- *
- * Poll for Corrected Machine Checks (CMCs)
- *
- * Inputs : dummy(unused)
- * Outputs : None
- *
- */
-static void
-#ifndef XEN
-ia64_mca_cmc_poll (unsigned long dummy)
-#else
-ia64_mca_cmc_poll (void *dummy)
-#endif
-{
- /* Trigger a CMC interrupt cascade */
- platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
-}
-
-/*
- * ia64_mca_cpe_int_caller
- *
- * Triggered by sw interrupt from CPE polling routine. Calls
- * real interrupt handler and either triggers a sw interrupt
- * on the next cpu or does cleanup at the end.
- *
- * Inputs
- * interrupt number
- * client data arg ptr
- * saved registers ptr
- * Outputs
- * handled
- */
-#ifdef CONFIG_ACPI
-
-static irqreturn_t
-ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
-{
- static int start_count = -1;
-#ifdef XEN
- static unsigned long poll_time = MIN_CPE_POLL_INTERVAL;
-#else
- static int poll_time = MIN_CPE_POLL_INTERVAL;
-#endif
- unsigned int cpuid;
-
- cpuid = smp_processor_id();
-
- /* If first cpu, update count */
- if (start_count == -1)
- start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
-
-#ifndef XEN
- ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
-#else
- IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n",
- __FUNCTION__, cpe_irq, smp_processor_id());
- ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE);
-#endif
-
- for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
-
- if (cpuid < NR_CPUS) {
- platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
- } else {
- /*
- * If a log was recorded, increase our polling frequency,
- * otherwise, backoff or return to interrupt mode.
- */
- if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
- poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
- } else if (cpe_vector < 0) {
- poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
- } else {
- poll_time = MIN_CPE_POLL_INTERVAL;
-
- printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
- enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
- cpe_poll_enabled = 0;
- }
-
- if (cpe_poll_enabled)
- mod_timer(&cpe_poll_timer, jiffies + poll_time);
- start_count = -1;
- }
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_cpe_poll
- *
- * Poll for Corrected Platform Errors (CPEs), trigger interrupt
- * on first cpu, from there it will trickle through all the cpus.
- *
- * Inputs : dummy(unused)
- * Outputs : None
- *
- */
-static void
-#ifndef XEN
-ia64_mca_cpe_poll (unsigned long dummy)
-#else
-ia64_mca_cpe_poll (void *dummy)
-#endif
-{
- /* Trigger a CPE interrupt cascade */
- platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
-}
-
-#endif /* CONFIG_ACPI */
-
-/*
- * C portion of the OS INIT handler
- *
- * Called from ia64_monarch_init_handler
- *
- * Inputs: pointer to pt_regs where processor info was saved.
- *
- * Returns:
- * 0 if SAL must warm boot the System
- * 1 if SAL must return to interrupted context using PAL_MC_RESUME
- *
- */
-void
-ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
-{
- pal_min_state_area_t *ms;
-
-#ifndef XEN
- oops_in_progress = 1; /* avoid deadlock in printk, but it makes recovery dodgy */
- console_loglevel = 15; /* make sure printks make it to console */
-
- printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
- ia64_sal_to_os_handoff_state.proc_state_param);
-
- /*
- * Address of minstate area provided by PAL is physical,
- * uncacheable (bit 63 set). Convert to Linux virtual
- * address in region 6.
- */
- ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
-#else
- int cpu = smp_processor_id();
-
- console_start_sync();
- printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
- ia64_sal_to_os_handoff_state[cpu].proc_state_param);
-
- /* Xen virtual address in region 7. */
- ms = __va((pal_min_state_area_t *)(ia64_sal_to_os_handoff_state[cpu].pal_min_state));
-#endif
-
- init_handler_platform(ms, pt, sw); /* call platform specific routines */
-}
-
-static int __init
-ia64_mca_disable_cpe_polling(char *str)
-{
- cpe_poll_enabled = 0;
- return 1;
-}
-
-__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
-
-static struct irqaction __read_mostly cmci_irqaction = {
- .handler = ia64_mca_cmc_int_handler,
-#ifndef XEN
- .flags = SA_INTERRUPT,
-#endif
- .name = "cmc_hndlr"
-};
-
-static struct irqaction __read_mostly cmcp_irqaction = {
- .handler = ia64_mca_cmc_int_caller,
-#ifndef XEN
- .flags = SA_INTERRUPT,
-#endif
- .name = "cmc_poll"
-};
-
-static struct irqaction __read_mostly mca_rdzv_irqaction = {
- .handler = ia64_mca_rendez_int_handler,
-#ifndef XEN
- .flags = SA_INTERRUPT,
-#endif
- .name = "mca_rdzv"
-};
-
-static struct irqaction __read_mostly mca_wkup_irqaction = {
- .handler = ia64_mca_wakeup_int_handler,
-#ifndef XEN
- .flags = SA_INTERRUPT,
-#endif
- .name = "mca_wkup"
-};
-
-#ifdef CONFIG_ACPI
-static struct irqaction __read_mostly mca_cpe_irqaction = {
- .handler = ia64_mca_cpe_int_handler,
-#ifndef XEN
- .flags = SA_INTERRUPT,
-#endif
- .name = "cpe_hndlr"
-};
-
-static struct irqaction __read_mostly mca_cpep_irqaction = {
- .handler = ia64_mca_cpe_int_caller,
-#ifndef XEN
- .flags = SA_INTERRUPT,
-#endif
- .name = "cpe_poll"
-};
-#endif /* CONFIG_ACPI */
-
-/* Do per-CPU MCA-related initialization. */
-
-void __devinit
-ia64_mca_cpu_init(void *cpu_data)
-{
- void *pal_vaddr;
-
- if (smp_processor_id() == 0) {
- void *mca_data;
- int cpu;
-
-#ifdef XEN
- unsigned int pageorder;
- pageorder = get_order_from_bytes(sizeof(struct ia64_mca_cpu));
-#else
- mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
- * NR_CPUS);
-#endif
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
-#ifdef XEN
- struct page_info *page;
- page = alloc_domheap_pages(NULL, pageorder, 0);
- mca_data = page? page_to_virt(page): NULL;
- __per_cpu_mca[cpu] = __pa(mca_data);
- IA64_MCA_DEBUG("%s: __per_cpu_mca[%d]=%lx"
- "(mca_data[%d]=%lx)\n",
- __FUNCTION__, cpu, __per_cpu_mca[cpu],
- cpu, (u64)mca_data);
-#else
- __per_cpu_mca[cpu] = __pa(mca_data);
- mca_data += sizeof(struct ia64_mca_cpu);
-#endif
- }
- }
-#ifdef XEN
- else if (sal_queue) {
- int i;
- for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
- ia64_log_queue(i, 0);
- }
-#endif
-
- /*
- * The MCA info structure was allocated earlier and its
- * physical address saved in __per_cpu_mca[cpu]. Copy that
- * address * to ia64_mca_data so we can access it as a per-CPU
- * variable.
- */
- __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
-#ifdef XEN
- IA64_MCA_DEBUG("%s: CPU#%d, ia64_mca_data=%lx\n", __FUNCTION__,
- smp_processor_id(), __get_cpu_var(ia64_mca_data));
-
- /* sal_to_os_handoff for smp support */
- __get_cpu_var(ia64_sal_to_os_handoff_state_addr) =
- __pa(&ia64_sal_to_os_handoff_state[smp_processor_id()]);
- IA64_MCA_DEBUG("%s: CPU#%d, ia64_sal_to_os=%lx\n", __FUNCTION__,
- smp_processor_id(),
- __get_cpu_var(ia64_sal_to_os_handoff_state_addr));
-#endif
-
- /*
- * Stash away a copy of the PTE needed to map the per-CPU page.
- * We may need it during MCA recovery.
- */
- __get_cpu_var(ia64_mca_per_cpu_pte) =
- pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
-
- /*
- * Also, stash away a copy of the PAL address and the PTE
- * needed to map it.
- */
- pal_vaddr = efi_get_pal_addr();
- if (!pal_vaddr)
- return;
- __get_cpu_var(ia64_mca_pal_base) =
- GRANULEROUNDDOWN((unsigned long) pal_vaddr);
- __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
- PAGE_KERNEL));
-}
-
-/*
- * ia64_mca_init
- *
- * Do all the system level mca specific initialization.
- *
- * 1. Register spinloop and wakeup request interrupt vectors
- *
- * 2. Register OS_MCA handler entry point
- *
- * 3. Register OS_INIT handler entry point
- *
- * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
- *
- * Note that this initialization is done very early before some kernel
- * services are available.
- *
- * Inputs : None
- *
- * Outputs : None
- */
-void __init
-ia64_mca_init(void)
-{
- ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
- ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
- ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
- int i;
- s64 rc;
- struct ia64_sal_retval isrv;
- u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
-
-#ifdef XEN
- slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
-#endif
-
- IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
-
- /* Clear the Rendez checkin flag for all cpus */
- for(i = 0 ; i < NR_CPUS; i++)
- ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
-
- /*
- * Register the rendezvous spinloop and wakeup mechanism with SAL
- */
-
- /* Register the rendezvous interrupt vector with SAL */
- while (1) {
- isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
- SAL_MC_PARAM_MECHANISM_INT,
- IA64_MCA_RENDEZ_VECTOR,
- timeout,
- SAL_MC_PARAM_RZ_ALWAYS);
- rc = isrv.status;
- if (rc == 0)
- break;
- if (rc == -2) {
- printk(KERN_INFO "Increasing MCA rendezvous timeout from "
- "%ld to %ld milliseconds\n", timeout, isrv.v0);
- timeout = isrv.v0;
- continue;
- }
- printk(KERN_ERR "Failed to register rendezvous interrupt "
- "with SAL (status %ld)\n", rc);
- return;
- }
-
- /* Register the wakeup interrupt vector with SAL */
- isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
- SAL_MC_PARAM_MECHANISM_INT,
- IA64_MCA_WAKEUP_VECTOR,
- 0, 0);
- rc = isrv.status;
- if (rc) {
- printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
- "(status %ld)\n", rc);
- return;
- }
-
- IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
-
- ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
- /*
- * XXX - disable SAL checksum by setting size to 0; should be
- * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
- */
- ia64_mc_info.imi_mca_handler_size = 0;
-
- /* Register the os mca handler with SAL */
- if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
- ia64_mc_info.imi_mca_handler,
- ia64_tpa(mca_hldlr_ptr->gp),
- ia64_mc_info.imi_mca_handler_size,
- 0, 0, 0)))
- {
- printk(KERN_ERR "Failed to register OS MCA handler with SAL "
- "(status %ld)\n", rc);
- return;
- }
-
- IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__,
- ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
-
- /*
- * XXX - disable SAL checksum by setting size to 0, should be
- * size of the actual init handler in mca_asm.S.
- */
- ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp);
- ia64_mc_info.imi_monarch_init_handler_size = 0;
- ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp);
- ia64_mc_info.imi_slave_init_handler_size = 0;
-
- IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
- ia64_mc_info.imi_monarch_init_handler);
-
- /* Register the os init handler with SAL */
- if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
- ia64_mc_info.imi_monarch_init_handler,
- ia64_tpa(ia64_getreg(_IA64_REG_GP)),
- ia64_mc_info.imi_monarch_init_handler_size,
- ia64_mc_info.imi_slave_init_handler,
- ia64_tpa(ia64_getreg(_IA64_REG_GP)),
- ia64_mc_info.imi_slave_init_handler_size)))
- {
- printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
- "(status %ld)\n", rc);
- return;
- }
-
- IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
-
- /*
- * Configure the CMCI/P vector and handler. Interrupts for CMC are
- * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
- */
- register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
- register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
- ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
-
- /* Setup the MCA rendezvous interrupt vector */
- register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
-
- /* Setup the MCA wakeup interrupt vector */
- register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
-
-#ifdef CONFIG_ACPI
- /* Setup the CPEI/P handler */
- register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
-#endif
-
- /* Initialize the areas set aside by the OS to buffer the
- * platform/processor error states for MCA/INIT/CMC
- * handling.
- */
- ia64_log_init(SAL_INFO_TYPE_MCA);
- ia64_log_init(SAL_INFO_TYPE_INIT);
- ia64_log_init(SAL_INFO_TYPE_CMC);
- ia64_log_init(SAL_INFO_TYPE_CPE);
-
-#ifdef XEN
- INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_MCA]);
- INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_INIT]);
- INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_CMC]);
- INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_CPE]);
-
- /* NULL sal_queue used elsewhere to determine MCA init state */
- sal_queue = sal_log_queues;
-
- open_softirq(CMC_DISABLE_SOFTIRQ,
- (softirq_handler)ia64_mca_cmc_vector_disable);
- open_softirq(CMC_ENABLE_SOFTIRQ,
- (softirq_handler)ia64_mca_cmc_vector_enable);
-
- for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
- ia64_log_queue(i, 0);
-#endif
-
- mca_init = 1;
- printk(KERN_INFO "MCA related initialization done\n");
-}
-
-/*
- * ia64_mca_late_init
- *
- * Opportunity to setup things that require initialization later
- * than ia64_mca_init. Setup a timer to poll for CPEs if the
- * platform doesn't support an interrupt driven mechanism.
- *
- * Inputs : None
- * Outputs : Status
- */
-static int __init
-ia64_mca_late_init(void)
-{
- if (!mca_init)
- return 0;
-
- /* Setup the CMCI/P vector and handler */
-#ifndef XEN
- init_timer(&cmc_poll_timer);
- cmc_poll_timer.function = ia64_mca_cmc_poll;
-#else
- init_timer(&cmc_poll_timer, ia64_mca_cmc_poll,
- NULL, smp_processor_id());
-#endif
-
- /* Unmask/enable the vector */
- cmc_polling_enabled = 0;
-#ifndef XEN /* XXX FIXME */
- schedule_work(&cmc_enable_work);
-#else
- cpumask_raise_softirq(&cpu_online_map, CMC_ENABLE_SOFTIRQ);
-#endif
-
- IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
-
-#ifdef CONFIG_ACPI
- /* Setup the CPEI/P vector and handler */
- cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
-#ifndef XEN
- init_timer(&cpe_poll_timer);
- cpe_poll_timer.function = ia64_mca_cpe_poll;
-#else
- init_timer(&cpe_poll_timer, ia64_mca_cpe_poll,
- NULL,smp_processor_id());
-#endif
-
- {
- irq_desc_t *desc;
-#ifndef XEN
- unsigned int irq;
-#endif
-
- if (cpe_vector >= 0) {
- /* If platform supports CPEI, enable the irq. */
- cpe_poll_enabled = 0;
-#ifndef XEN
- for (irq = 0; irq < NR_IRQS; ++irq)
- if (irq_to_vector(irq) == cpe_vector) {
- desc = irq_descp(irq);
- desc->status |= IRQ_PER_CPU;
- setup_vector(irq, &mca_cpe_irqaction);
- }
-#else
- desc = irq_descp(cpe_vector);
- desc->status |= IRQ_PER_CPU;
- setup_vector(cpe_vector, &mca_cpe_irqaction);
-#endif
- ia64_mca_register_cpev(cpe_vector);
- IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
- } else {
- /* If platform doesn't support CPEI, get the timer going. */
- if (cpe_poll_enabled) {
- ia64_mca_cpe_poll(0UL);
- IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
- }
- }
- }
-#endif
-
- return 0;
-}
-
-#ifndef XEN
-device_initcall(ia64_mca_late_init);
-#else
-__initcall(ia64_mca_late_init);
-#endif
diff --git a/xen/arch/ia64/linux-xen/mca_asm.S b/xen/arch/ia64/linux-xen/mca_asm.S
deleted file mode 100644
index a045fd752f..0000000000
--- a/xen/arch/ia64/linux-xen/mca_asm.S
+++ /dev/null
@@ -1,1250 +0,0 @@
-//
-// assembly portion of the IA64 MCA handling
-//
-// Mods by cfleck to integrate into kernel build
-// 00/03/15 davidm Added various stop bits to get a clean compile
-//
-// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
-// kstack, switch modes, jump to C INIT handler
-//
-// 02/01/04 J.Hall <jenna.s.hall@intel.com>
-// Before entering virtual mode code:
-// 1. Check for TLB CPU error
-// 2. Restore current thread pointer to kr6
-// 3. Move stack ptr 16 bytes to conform to C calling convention
-//
-// 04/11/12 Russ Anderson <rja@sgi.com>
-// Added per cpu MCA/INIT stack save areas.
-//
-#include <linux/config.h>
-#include <linux/threads.h>
-
-#include <asm/asmmacro.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/mca_asm.h>
-#include <asm/mca.h>
-#ifdef XEN
-#include <asm/vhpt.h>
-#include <public/arch-ia64.h>
-#endif
-
-/*
- * When we get a machine check, the kernel stack pointer is no longer
- * valid, so we need to set a new stack pointer.
- */
-#define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */
-
-/*
- * Needed for return context to SAL
- */
-#define IA64_MCA_SAME_CONTEXT 0
-#define IA64_MCA_COLD_BOOT -2
-
-#include "minstate.h"
-
-/*
- * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
- * 1. GR1 = OS GP
- * 2. GR8 = PAL_PROC physical address
- * 3. GR9 = SAL_PROC physical address
- * 4. GR10 = SAL GP (physical)
- * 5. GR11 = Rendez state
- * 6. GR12 = Return address to location within SAL_CHECK
- */
-#ifdef XEN
-#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
- GET_THIS_PADDR(_tmp, ia64_sal_to_os_handoff_state_addr);; \
- ld8 _tmp=[_tmp];; \
- st8 [_tmp]=r1,0x08;; \
- st8 [_tmp]=r8,0x08;; \
- st8 [_tmp]=r9,0x08;; \
- st8 [_tmp]=r10,0x08;; \
- st8 [_tmp]=r11,0x08;; \
- st8 [_tmp]=r12,0x08;; \
- st8 [_tmp]=r17,0x08;; \
- st8 [_tmp]=r18,0x08
-#else
-#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
- LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
- st8 [_tmp]=r1,0x08;; \
- st8 [_tmp]=r8,0x08;; \
- st8 [_tmp]=r9,0x08;; \
- st8 [_tmp]=r10,0x08;; \
- st8 [_tmp]=r11,0x08;; \
- st8 [_tmp]=r12,0x08;; \
- st8 [_tmp]=r17,0x08;; \
- st8 [_tmp]=r18,0x08
-#endif /* XEN */
-
-/*
- * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
- * (p6) is executed if we never entered virtual mode (TLB error)
- * (p7) is executed if we entered virtual mode as expected (normal case)
- * 1. GR8 = OS_MCA return status
- * 2. GR9 = SAL GP (physical)
- * 3. GR10 = 0/1 returning same/new context
- * 4. GR22 = New min state save area pointer
- * returns ptr to SAL rtn save loc in _tmp
- */
-#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
- movl _tmp=ia64_os_to_sal_handoff_state;; \
- DATA_VA_TO_PA(_tmp);; \
- ld8 r8=[_tmp],0x08;; \
- ld8 r9=[_tmp],0x08;; \
- ld8 r10=[_tmp],0x08;; \
- ld8 r22=[_tmp],0x08;;
- // now _tmp is pointing to SAL rtn save location
-
-/*
- * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
- * imots_os_status=IA64_MCA_COLD_BOOT
- * imots_sal_gp=SAL GP
- * imots_context=IA64_MCA_SAME_CONTEXT
- * imots_new_min_state=Min state save area pointer
- * imots_sal_check_ra=Return address to location within SAL_CHECK
- *
- */
-#ifdef XEN
-#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
- movl tmp=IA64_MCA_COLD_BOOT; \
- GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);; \
- ld8 sal_to_os_handoff=[sal_to_os_handoff];; \
- movl os_to_sal_handoff=ia64_os_to_sal_handoff_state;; \
- dep os_to_sal_handoff = 0, os_to_sal_handoff, 60, 4;; \
- /*DATA_VA_TO_PA(os_to_sal_handoff);;*/ \
- st8 [os_to_sal_handoff]=tmp,8;; \
- ld8 tmp=[sal_to_os_handoff],48;; \
- st8 [os_to_sal_handoff]=tmp,8;; \
- movl tmp=IA64_MCA_SAME_CONTEXT;; \
- st8 [os_to_sal_handoff]=tmp,8;; \
- ld8 tmp=[sal_to_os_handoff],-8;; \
- st8 [os_to_sal_handoff]=tmp,8;; \
- ld8 tmp=[sal_to_os_handoff];; \
- st8 [os_to_sal_handoff]=tmp;;
-#else /* XEN */
-#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
- movl tmp=IA64_MCA_COLD_BOOT; \
- movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \
- movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \
- st8 [os_to_sal_handoff]=tmp,8;; \
- ld8 tmp=[sal_to_os_handoff],48;; \
- st8 [os_to_sal_handoff]=tmp,8;; \
- movl tmp=IA64_MCA_SAME_CONTEXT;; \
- st8 [os_to_sal_handoff]=tmp,8;; \
- ld8 tmp=[sal_to_os_handoff],-8;; \
- st8 [os_to_sal_handoff]=tmp,8;; \
- ld8 tmp=[sal_to_os_handoff];; \
- st8 [os_to_sal_handoff]=tmp;;
-#endif /* XEN */
-
-#define GET_IA64_MCA_DATA(reg) \
- GET_THIS_PADDR(reg, ia64_mca_data) \
- ;; \
- ld8 reg=[reg]
-
- .global ia64_os_mca_dispatch
- .global ia64_os_mca_dispatch_end
-#ifndef XEN
- .global ia64_sal_to_os_handoff_state
- .global ia64_os_to_sal_handoff_state
-#endif
- .global ia64_do_tlb_purge
-
- .text
- .align 16
-
-/*
- * Just the TLB purge part is moved to a separate function
- * so we can re-use the code for cpu hotplug code as well
- * Caller should now setup b1, so we can branch once the
- * tlb flush is complete.
- */
-
-ia64_do_tlb_purge:
-#define O(member) IA64_CPUINFO_##member##_OFFSET
-
- GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
- ;;
- addl r17=O(PTCE_STRIDE),r2
- addl r2=O(PTCE_BASE),r2
- ;;
- ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
- ld4 r19=[r2],4 // r19=ptce_count[0]
- ld4 r21=[r17],4 // r21=ptce_stride[0]
- ;;
- ld4 r20=[r2] // r20=ptce_count[1]
- ld4 r22=[r17] // r22=ptce_stride[1]
- mov r24=0
- ;;
- adds r20=-1,r20
- ;;
-#undef O
-
-2:
- cmp.ltu p6,p7=r24,r19
-(p7) br.cond.dpnt.few 4f
- mov ar.lc=r20
-3:
- ptc.e r18
- ;;
- add r18=r22,r18
- br.cloop.sptk.few 3b
- ;;
- add r18=r21,r18
- add r24=1,r24
- ;;
- br.sptk.few 2b
-4:
- srlz.i // srlz.i implies srlz.d
- ;;
-
- // Now purge addresses formerly mapped by TR registers
- // 1. Purge ITR&DTR for kernel.
- movl r16=KERNEL_START
- mov r18=KERNEL_TR_PAGE_SHIFT<<2
- ;;
- ptr.i r16, r18
- ptr.d r16, r18
- ;;
- srlz.i
- ;;
- srlz.d
- ;;
- // 2. Purge DTR for PERCPU data.
- movl r16=PERCPU_ADDR
- mov r18=PERCPU_PAGE_SHIFT<<2
- ;;
- ptr.d r16,r18
- ;;
- srlz.d
- ;;
- // 3. Purge ITR for PAL code.
- GET_THIS_PADDR(r2, ia64_mca_pal_base)
- ;;
- ld8 r16=[r2]
- mov r18=IA64_GRANULE_SHIFT<<2
- ;;
- ptr.i r16,r18
- ;;
- srlz.i
- ;;
- // 4. Purge DTR for stack.
-#ifdef XEN
- // Kernel registers are saved in a per_cpu cpu_kr_ia64_t
- // to allow the kernel registers themselves to be used by domains.
- GET_THIS_PADDR(r2, cpu_kr);;
- add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
- ;;
- ld8 r16=[r2]
-#else
- mov r16=IA64_KR(CURRENT_STACK)
-#endif
- ;;
- shl r16=r16,IA64_GRANULE_SHIFT
- movl r19=PAGE_OFFSET
- ;;
- add r16=r19,r16
- mov r18=IA64_GRANULE_SHIFT<<2
- ;;
- ptr.d r16,r18
- ;;
- srlz.i
- ;;
-#ifdef XEN
- // 5. shared_info
- GET_THIS_PADDR(r2, inserted_shared_info);;
- ld8 r16=[r2]
- mov r18=XSI_SHIFT<<2
- ;;
- ptr.d r16,r18
- ;;
- srlz.d
- ;;
-
- // 6. mapped_regs
- GET_THIS_PADDR(r2, inserted_mapped_regs);;
- ld8 r16=[r2]
- mov r18=XMAPPEDREGS_SHIFT<<2
- ;;
- ptr.d r16,r18
- ;;
- srlz.d
- ;;
-
- // 7. VPD
- // The VPD will not be mapped in the case where
- // a VMX domain hasn't been started since boot
- GET_THIS_PADDR(r2, inserted_vpd);;
- ld8 r16=[r2]
- mov r18=IA64_GRANULE_SHIFT<<2
- ;;
- cmp.eq p7,p0=r16,r0
- ;;
-(p7) br.cond.sptk .vpd_not_mapped
- ;;
- ptr.i r16,r18
- ;;
- ptr.d r16,r18
- ;;
- srlz.i
- ;;
- srlz.d
- ;;
-.vpd_not_mapped:
-
- // 8. VHPT
- // GET_VA_VCPU_VHPT_MADDR() may not give the
- // value of the VHPT currently pinned into the TLB
- GET_THIS_PADDR(r2, inserted_vhpt);;
- ld8 r2=[r2]
- ;;
- cmp.eq p7,p0=r2,r0
- ;;
-(p7) br.cond.sptk .vhpt_not_mapped
- dep r16=0,r2,0,IA64_GRANULE_SHIFT
- mov r18=IA64_GRANULE_SHIFT<<2
- ;;
- ptr.d r16,r18
- ;;
- srlz.d
- ;;
-.vhpt_not_mapped:
-#endif
- // Now branch away to caller.
- br.sptk.many b1
- ;;
-
-ia64_os_mca_dispatch:
-
- // Serialize all MCA processing
- mov r3=1;;
- LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
-ia64_os_mca_spin:
- xchg8 r4=[r2],r3;;
- cmp.ne p6,p0=r4,r0
-(p6) br ia64_os_mca_spin
-
- // Save the SAL to OS MCA handoff state as defined
- // by SAL SPEC 3.0
- // NOTE : The order in which the state gets saved
- // is dependent on the way the C-structure
- // for ia64_mca_sal_to_os_state_t has been
- // defined in include/asm/mca.h
- SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
- ;;
-
- // LOG PROCESSOR STATE INFO FROM HERE ON..
-begin_os_mca_dump:
- br ia64_os_mca_proc_state_dump;;
-
-ia64_os_mca_done_dump:
-
-#ifdef XEN
- // Set current to ar.k6
- GET_THIS_PADDR(r2,cpu_kr);;
- add r2=IA64_KR_CURRENT_OFFSET,r2;;
- ld8 r2=[r2];;
- mov ar.k6=r2;;
-
- GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);;
- ld8 r2=[r2];;
- adds r16=56,r2
-#else
- LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
-#endif
- ;;
- ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
- ;;
- tbit.nz p6,p7=r18,60
-(p7) br.spnt done_tlb_purge_and_reload
-
- // The following code purges TC and TR entries. Then reload all TC entries.
- // Purge percpu data TC entries.
-begin_tlb_purge_and_reload:
- movl r18=ia64_reload_tr;;
- LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
- mov b1=r18;;
- br.sptk.many ia64_do_tlb_purge;;
-
-ia64_reload_tr:
- // Finally reload the TR registers.
- // 1. Reload DTR/ITR registers for kernel.
- mov r18=KERNEL_TR_PAGE_SHIFT<<2
- movl r17=KERNEL_START
- ;;
- mov cr.itir=r18
- mov cr.ifa=r17
- mov r16=IA64_TR_KERNEL
- mov r19=ip
- movl r18=PAGE_KERNEL
- ;;
- dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
- ;;
- or r18=r17,r18
- ;;
- itr.i itr[r16]=r18
- ;;
- itr.d dtr[r16]=r18
- ;;
- srlz.i
- srlz.d
- ;;
- // 2. Reload DTR register for PERCPU data.
- GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
- ;;
- movl r16=PERCPU_ADDR // vaddr
- movl r18=PERCPU_PAGE_SHIFT<<2
- ;;
- mov cr.itir=r18
- mov cr.ifa=r16
- ;;
- ld8 r18=[r2] // load per-CPU PTE
- mov r16=IA64_TR_PERCPU_DATA;
- ;;
- itr.d dtr[r16]=r18
- ;;
- srlz.d
- ;;
-#ifndef XEN
- // 3. Reload ITR for PAL code.
- GET_THIS_PADDR(r2, ia64_mca_pal_pte)
- ;;
- ld8 r18=[r2] // load PAL PTE
- ;;
- GET_THIS_PADDR(r2, ia64_mca_pal_base)
- ;;
- ld8 r16=[r2] // load PAL vaddr
- mov r19=IA64_GRANULE_SHIFT<<2
- ;;
- mov cr.itir=r19
- mov cr.ifa=r16
- mov r20=IA64_TR_PALCODE
- ;;
- itr.i itr[r20]=r18
- ;;
- srlz.i
- ;;
-#endif
-
- // 4. Reload DTR for stack.
-#ifdef XEN
- // Kernel registers are saved in a per_cpu cpu_kr_ia64_t
- // to allow the kernel registers themselves to be used by domains.
- GET_THIS_PADDR(r2, cpu_kr);;
- add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
- ;;
- ld8 r16=[r2]
-#else
- mov r16=IA64_KR(CURRENT_STACK)
-#endif
- ;;
- shl r16=r16,IA64_GRANULE_SHIFT
- movl r19=PAGE_OFFSET
- ;;
- add r18=r19,r16
- movl r20=PAGE_KERNEL
- ;;
- add r16=r20,r16
- mov r19=IA64_GRANULE_SHIFT<<2
- ;;
- mov cr.itir=r19
- mov cr.ifa=r18
- mov r20=IA64_TR_CURRENT_STACK
- ;;
- itr.d dtr[r20]=r16
- ;;
- srlz.d
- ;;
-#ifdef XEN
- // if !VMX_DOMAIN(current)
- // pin down shared_info and mapped_regs
- // else
- // pin down VPD
- GET_THIS_PADDR(r2,cpu_kr);;
- add r2=IA64_KR_CURRENT_OFFSET,r2
- ;;
- ld8 r2=[r2]
- ;;
- dep r2=0,r2,60,4
- ;;
- add r2=IA64_VCPU_FLAGS_OFFSET,r2
- ;;
- ld8 r2=[r2]
- ;;
- cmp.eq p6,p7 = r2,r0
-(p7) br.cond.sptk .vmx_domain
-
- // 5. shared_info
- GET_THIS_PADDR(r2, inserted_shared_info);;
- ld8 r16=[r2]
- mov r18=XSI_SHIFT<<2
- movl r20=__pgprot(__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RW)
- ;;
- GET_THIS_PADDR(r2, domain_shared_info);;
- ld8 r17=[r2]
- ;;
- dep r17=0,r17,60,4
- ;;
- or r17=r17,r20 // construct PA | page properties
- mov cr.itir=r18
- mov cr.ifa=r16
- ;;
- mov r16=IA64_TR_SHARED_INFO
- ;;
- itr.d dtr[r16]=r17 // wire in new mapping...
- ;;
- srlz.d
- ;;
-
- // 6. mapped_regs
- GET_THIS_PADDR(r2, inserted_mapped_regs);;
- ld8 r16=[r2]
- mov r18=XMAPPEDREGS_SHIFT<<2
- ;;
- GET_THIS_PADDR(r2,cpu_kr);;
- add r2=IA64_KR_CURRENT_OFFSET,r2
- ;;
- ld8 r2=[r2]
- ;;
- dep r2=0,r2,60,4
- ;;
- add r2=IA64_VPD_BASE_OFFSET,r2
- ;;
- ld8 r17=[r2]
- ;;
- dep r17=0,r17,60,4
- ;;
- or r17=r17,r20 // construct PA | page properties
- mov cr.itir=r18
- mov cr.ifa=r16
- ;;
- mov r16=IA64_TR_MAPPED_REGS
- ;;
- itr.d dtr[r16]=r17 // wire in new mapping...
- ;;
- srlz.d
- ;;
- br.sptk.many .reload_vpd_not_mapped;;
-.vmx_domain:
-
- // 7. VPD
- GET_THIS_PADDR(r2, inserted_vpd);;
- ld8 r16=[r2]
- mov r18=IA64_GRANULE_SHIFT<<2
- ;;
- cmp.eq p7,p0=r16,r0
- ;;
-(p7) br.cond.sptk .reload_vpd_not_mapped
- dep r17=0,r16,60,4
- ;;
- dep r17=0,r17,0,IA64_GRANULE_SHIFT
- ;;
-
- // avoid overlapping with stack
- GET_THIS_PADDR(r2, cpu_kr);;
- add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
- ;;
- ld8 r19=[r2]
- ;;
- shl r19=r19,IA64_GRANULE_SHIFT
- ;;
- cmp.eq p0,p7=r17,r19
-
- movl r20=PAGE_KERNEL
- ;;
- or r17=r20,r17 // construct PA | page properties
- ;;
- mov cr.itir=r18
- mov cr.ifa=r16
- ;;
- mov r16=IA64_TR_VPD
- mov r18=IA64_TR_MAPPED_REGS
- ;;
- itr.i itr[r16]=r17
- ;;
-(p7) itr.d dtr[r18]=r17
- ;;
- srlz.i
- ;;
- srlz.d
- ;;
-.reload_vpd_not_mapped:
-
- // 8. VHPT
- GET_THIS_PADDR(r2, inserted_vhpt);;
- ld8 r2=[r2]
- ;;
- cmp.eq p7,p0=r2,r0
- ;;
-(p7) br.cond.sptk .overlap_vhpt // vhpt isn't mapped.
-
- dep r16=0,r2,0,IA64_GRANULE_SHIFT
- ;;
- dep r17=0,r16,60,4 // physical address of
- // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
-
- // avoid overlapping with stack TR
- GET_THIS_PADDR(r2,cpu_kr);;
- add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
- ;;
- ld8 r2=[r2]
- ;;
- shl r18=r2,IA64_GRANULE_SHIFT
- ;;
- cmp.eq p7,p0=r17,r18
-(p7) br.cond.sptk .overlap_vhpt
-
- // avoid overlapping with VPD
- GET_THIS_PADDR(r2, inserted_vpd);;
- ld8 r18=[r2]
- ;;
- dep r18=0,r18,60,4
- ;;
- dep r18=0,r18,0,IA64_GRANULE_SHIFT
- ;;
- cmp.eq p7,p0=r17,r18
-(p7) br.cond.sptk .overlap_vhpt
-
- movl r20=PAGE_KERNEL
- ;;
- mov r18=IA64_TR_VHPT
- mov r19=IA64_GRANULE_SHIFT<<2
- ;;
- or r17=r17,r20 // construct PA | page properties
- mov cr.itir=r19
- mov cr.ifa=r16
- ;;
- itr.d dtr[r18]=r17 // wire in new mapping...
- ;;
- srlz.d
- ;;
-.overlap_vhpt:
-#endif
- br.sptk.many done_tlb_purge_and_reload
-err:
- COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
- br.sptk.many ia64_os_mca_done_restore
-
-done_tlb_purge_and_reload:
-
- // Setup new stack frame for OS_MCA handling
- GET_IA64_MCA_DATA(r2)
- ;;
- add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
- add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
- ;;
- rse_switch_context(r6,r3,r2);; // RSC management in this new context
-
- GET_IA64_MCA_DATA(r2)
- ;;
- add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
- ;;
- mov r12=r2 // establish new stack-pointer
-
- // Enter virtual mode from physical mode
- VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
-ia64_os_mca_virtual_begin:
-
- // Call virtual mode handler
- movl r2=ia64_mca_ucmc_handler;;
- mov b6=r2;;
- br.call.sptk.many b0=b6;;
-.ret0:
- // Revert back to physical mode before going back to SAL
- PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
-ia64_os_mca_virtual_end:
-
- // restore the original stack frame here
- GET_IA64_MCA_DATA(r2)
- ;;
- add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
- ;;
- movl r4=IA64_PSR_MC
- ;;
- rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
-
- // let us restore all the registers from our PSI structure
- mov r8=gp
- ;;
-begin_os_mca_restore:
- br ia64_os_mca_proc_state_restore;;
-
-ia64_os_mca_done_restore:
- OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
- // branch back to SALE_CHECK
- ld8 r3=[r2];;
- mov b0=r3;; // SAL_CHECK return address
-
- // release lock
- movl r3=ia64_mca_serialize;;
- DATA_VA_TO_PA(r3);;
- st8.rel [r3]=r0
-
- br b0
- ;;
-ia64_os_mca_dispatch_end:
-//EndMain//////////////////////////////////////////////////////////////////////
-
-
-//++
-// Name:
-// ia64_os_mca_proc_state_dump()
-//
-// Stub Description:
-//
-// This stub dumps the processor state during MCHK to a data area
-//
-//--
-
-ia64_os_mca_proc_state_dump:
-// Save bank 1 GRs 16-31 which will be used by c-language code when we switch
-// to virtual addressing mode.
- GET_IA64_MCA_DATA(r2)
- ;;
- add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
- ;;
-// save ar.NaT
- mov r5=ar.unat // ar.unat
-
-// save banked GRs 16-31 along with NaT bits
- bsw.1;;
- st8.spill [r2]=r16,8;;
- st8.spill [r2]=r17,8;;
- st8.spill [r2]=r18,8;;
- st8.spill [r2]=r19,8;;
- st8.spill [r2]=r20,8;;
- st8.spill [r2]=r21,8;;
- st8.spill [r2]=r22,8;;
- st8.spill [r2]=r23,8;;
- st8.spill [r2]=r24,8;;
- st8.spill [r2]=r25,8;;
- st8.spill [r2]=r26,8;;
- st8.spill [r2]=r27,8;;
- st8.spill [r2]=r28,8;;
- st8.spill [r2]=r29,8;;
- st8.spill [r2]=r30,8;;
- st8.spill [r2]=r31,8;;
-
- mov r4=ar.unat;;
- st8 [r2]=r4,8 // save User NaT bits for r16-r31
- mov ar.unat=r5 // restore original unat
- bsw.0;;
-
-//save BRs
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2 // duplicate r2 in r4
-
- mov r3=b0
- mov r5=b1
- mov r7=b2;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=b3
- mov r5=b4
- mov r7=b5;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=b6
- mov r5=b7;;
- st8 [r2]=r3,2*8
- st8 [r4]=r5,2*8;;
-
-cSaveCRs:
-// save CRs
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2 // duplicate r2 in r4
-
- mov r3=cr.dcr
- mov r5=cr.itm
- mov r7=cr.iva;;
-
- st8 [r2]=r3,8*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;; // 48 byte rements
-
- mov r3=cr.pta;;
- st8 [r2]=r3,8*8;; // 64 byte rements
-
-// if PSR.ic=0, reading interruption registers causes an illegal operation fault
- mov r3=psr;;
- tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
-(p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
-begin_skip_intr_regs:
-(p6) br SkipIntrRegs;;
-
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2 // duplicate r2 in r6
-
- mov r3=cr.ipsr
- mov r5=cr.isr
- mov r7=r0;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=cr.iip
- mov r5=cr.ifa
- mov r7=cr.itir;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=cr.iipa
- mov r5=cr.ifs
- mov r7=cr.iim;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=cr25;; // cr.iha
- st8 [r2]=r3,160;; // 160 byte rement
-
-SkipIntrRegs:
- st8 [r2]=r0,152;; // another 152 byte .
-
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2 // duplicate r2 in r6
-
- mov r3=cr.lid
-// mov r5=cr.ivr // cr.ivr, don't read it
- mov r7=cr.tpr;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=r0 // cr.eoi => cr67
- mov r5=r0 // cr.irr0 => cr68
- mov r7=r0;; // cr.irr1 => cr69
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=r0 // cr.irr2 => cr70
- mov r5=r0 // cr.irr3 => cr71
- mov r7=cr.itv;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=cr.pmv
- mov r5=cr.cmcv;;
- st8 [r2]=r3,7*8
- st8 [r4]=r5,7*8;;
-
- mov r3=r0 // cr.lrr0 => cr80
- mov r5=r0;; // cr.lrr1 => cr81
- st8 [r2]=r3,23*8
- st8 [r4]=r5,23*8;;
-
- adds r2=25*8,r2;;
-
-cSaveARs:
-// save ARs
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2 // duplicate r2 in r6
-
- mov r3=ar.k0
- mov r5=ar.k1
- mov r7=ar.k2;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=ar.k3
- mov r5=ar.k4
- mov r7=ar.k5;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=ar.k6
- mov r5=ar.k7
- mov r7=r0;; // ar.kr8
- st8 [r2]=r3,10*8
- st8 [r4]=r5,10*8
- st8 [r6]=r7,10*8;; // rement by 72 bytes
-
- mov r3=ar.rsc
- mov ar.rsc=r0 // put RSE in enforced lazy mode
- mov r5=ar.bsp
- ;;
- mov r7=ar.bspstore;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=ar.rnat;;
- st8 [r2]=r3,8*13 // increment by 13x8 bytes
-
- mov r3=ar.ccv;;
- st8 [r2]=r3,8*4
-
- mov r3=ar.unat;;
- st8 [r2]=r3,8*4
-
- mov r3=ar.fpsr;;
- st8 [r2]=r3,8*4
-
- mov r3=ar.itc;;
- st8 [r2]=r3,160 // 160
-
- mov r3=ar.pfs;;
- st8 [r2]=r3,8
-
- mov r3=ar.lc;;
- st8 [r2]=r3,8
-
- mov r3=ar.ec;;
- st8 [r2]=r3
- add r2=8*62,r2 //padding
-
-// save RRs
- mov ar.lc=0x08-1
- movl r4=0x00;;
-
-cStRR:
- dep.z r5=r4,61,3;;
- mov r3=rr[r5];;
- st8 [r2]=r3,8
- add r4=1,r4
- br.cloop.sptk.few cStRR
- ;;
-end_os_mca_dump:
- br ia64_os_mca_done_dump;;
-
-//EndStub//////////////////////////////////////////////////////////////////////
-
-
-//++
-// Name:
-// ia64_os_mca_proc_state_restore()
-//
-// Stub Description:
-//
-// This is a stub to restore the saved processor state during MCHK
-//
-//--
-
-ia64_os_mca_proc_state_restore:
-
-// Restore bank1 GR16-31
- GET_IA64_MCA_DATA(r2)
- ;;
- add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
-
-restore_GRs: // restore bank-1 GRs 16-31
- bsw.1;;
- add r3=16*8,r2;; // to get to NaT of GR 16-31
- ld8 r3=[r3];;
- mov ar.unat=r3;; // first restore NaT
-
- ld8.fill r16=[r2],8;;
- ld8.fill r17=[r2],8;;
- ld8.fill r18=[r2],8;;
- ld8.fill r19=[r2],8;;
- ld8.fill r20=[r2],8;;
- ld8.fill r21=[r2],8;;
- ld8.fill r22=[r2],8;;
- ld8.fill r23=[r2],8;;
- ld8.fill r24=[r2],8;;
- ld8.fill r25=[r2],8;;
- ld8.fill r26=[r2],8;;
- ld8.fill r27=[r2],8;;
- ld8.fill r28=[r2],8;;
- ld8.fill r29=[r2],8;;
- ld8.fill r30=[r2],8;;
- ld8.fill r31=[r2],8;;
-
- ld8 r3=[r2],8;; // increment to skip NaT
- bsw.0;;
-
-restore_BRs:
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2;; // duplicate r2 in r4
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov b0=r3
- mov b1=r5
- mov b2=r7;;
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov b3=r3
- mov b4=r5
- mov b5=r7;;
-
- ld8 r3=[r2],2*8
- ld8 r5=[r4],2*8;;
- mov b6=r3
- mov b7=r5;;
-
-restore_CRs:
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2;; // duplicate r2 in r4
-
- ld8 r3=[r2],8*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;; // 48 byte increments
- mov cr.dcr=r3
- mov cr.itm=r5
- mov cr.iva=r7;;
-
- ld8 r3=[r2],8*8;; // 64 byte increments
-// mov cr.pta=r3
-
-
-// if PSR.ic=1, reading interruption registers causes an illegal operation fault
- mov r3=psr;;
- tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
-(p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
-
-begin_rskip_intr_regs:
-(p6) br rSkipIntrRegs;;
-
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2;; // duplicate r2 in r4
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov cr.ipsr=r3
-// mov cr.isr=r5 // cr.isr is read only
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov cr.iip=r3
- mov cr.ifa=r5
- mov cr.itir=r7;;
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov cr.iipa=r3
- mov cr.ifs=r5
- mov cr.iim=r7
-
- ld8 r3=[r2],160;; // 160 byte increment
- mov cr.iha=r3
-
-rSkipIntrRegs:
- ld8 r3=[r2],152;; // another 152 byte inc.
-
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2;; // duplicate r2 in r6
-
- ld8 r3=[r2],8*3
- ld8 r5=[r4],8*3
- ld8 r7=[r6],8*3;;
- mov cr.lid=r3
-// mov cr.ivr=r5 // cr.ivr is read only
- mov cr.tpr=r7;;
-
- ld8 r3=[r2],8*3
- ld8 r5=[r4],8*3
- ld8 r7=[r6],8*3;;
-// mov cr.eoi=r3
-// mov cr.irr0=r5 // cr.irr0 is read only
-// mov cr.irr1=r7;; // cr.irr1 is read only
-
- ld8 r3=[r2],8*3
- ld8 r5=[r4],8*3
- ld8 r7=[r6],8*3;;
-// mov cr.irr2=r3 // cr.irr2 is read only
-// mov cr.irr3=r5 // cr.irr3 is read only
- mov cr.itv=r7;;
-
- ld8 r3=[r2],8*7
- ld8 r5=[r4],8*7;;
- mov cr.pmv=r3
- mov cr.cmcv=r5;;
-
- ld8 r3=[r2],8*23
- ld8 r5=[r4],8*23;;
- adds r2=8*23,r2
- adds r4=8*23,r4;;
-// mov cr.lrr0=r3
-// mov cr.lrr1=r5
-
- adds r2=8*2,r2;;
-
-restore_ARs:
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2;; // duplicate r2 in r4
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov ar.k0=r3
- mov ar.k1=r5
- mov ar.k2=r7;;
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov ar.k3=r3
- mov ar.k4=r5
- mov ar.k5=r7;;
-
- ld8 r3=[r2],10*8
- ld8 r5=[r4],10*8
- ld8 r7=[r6],10*8;;
- mov ar.k6=r3
- mov ar.k7=r5
- ;;
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
-// mov ar.rsc=r3
-// mov ar.bsp=r5 // ar.bsp is read only
- mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode
- ;;
- mov ar.bspstore=r7;;
-
- ld8 r9=[r2],8*13;;
- mov ar.rnat=r9
-
- mov ar.rsc=r3
- ld8 r3=[r2],8*4;;
- mov ar.ccv=r3
-
- ld8 r3=[r2],8*4;;
- mov ar.unat=r3
-
- ld8 r3=[r2],8*4;;
- mov ar.fpsr=r3
-
- ld8 r3=[r2],160;; // 160
-// mov ar.itc=r3
-
- ld8 r3=[r2],8;;
- mov ar.pfs=r3
-
- ld8 r3=[r2],8;;
- mov ar.lc=r3
-
- ld8 r3=[r2];;
- mov ar.ec=r3
- add r2=8*62,r2;; // padding
-
-restore_RRs:
- mov r5=ar.lc
- mov ar.lc=0x08-1
- movl r4=0x00;;
-cStRRr:
- dep.z r7=r4,61,3
- ld8 r3=[r2],8;;
- mov rr[r7]=r3 // what are its access previledges?
- add r4=1,r4
- br.cloop.sptk.few cStRRr
- ;;
- mov ar.lc=r5
- ;;
-end_os_mca_restore:
- br ia64_os_mca_done_restore;;
-
-//EndStub//////////////////////////////////////////////////////////////////////
-
-
-// ok, the issue here is that we need to save state information so
-// it can be useable by the kernel debugger and show regs routines.
-// In order to do this, our best bet is save the current state (plus
-// the state information obtain from the MIN_STATE_AREA) into a pt_regs
-// format. This way we can pass it on in a useable format.
-//
-
-//
-// SAL to OS entry point for INIT on the monarch processor
-// This has been defined for registration purposes with SAL
-// as a part of ia64_mca_init.
-//
-// When we get here, the following registers have been
-// set by the SAL for our use
-//
-// 1. GR1 = OS INIT GP
-// 2. GR8 = PAL_PROC physical address
-// 3. GR9 = SAL_PROC physical address
-// 4. GR10 = SAL GP (physical)
-// 5. GR11 = Init Reason
-// 0 = Received INIT for event other than crash dump switch
-// 1 = Received wakeup at the end of an OS_MCA corrected machine check
-// 2 = Received INIT dude to CrashDump switch assertion
-//
-// 6. GR12 = Return address to location within SAL_INIT procedure
-
-
-GLOBAL_ENTRY(ia64_monarch_init_handler)
- .prologue
-#ifdef XEN /* Need in ia64_monarch_init_handler? */
- // Set current to ar.k6
- GET_THIS_PADDR(r2,cpu_kr);;
- add r2=IA64_KR_CURRENT_OFFSET,r2;;
- ld8 r2=[r2];;
- mov ar.k6=r2;;
-#endif
- // stash the information the SAL passed to os
- SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
- ;;
- SAVE_MIN_WITH_COVER
- ;;
- mov r8=cr.ifa
- mov r9=cr.isr
- adds r3=8,r2 // set up second base pointer
- ;;
- SAVE_REST
-
-// ok, enough should be saved at this point to be dangerous, and supply
-// information for a dump
-// We need to switch to Virtual mode before hitting the C functions.
-
- movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
- mov r3=psr // get the current psr, minimum enabled at this point
- ;;
- or r2=r2,r3
- ;;
- movl r3=IVirtual_Switch
- ;;
- mov cr.iip=r3 // short return to set the appropriate bits
- mov cr.ipsr=r2 // need to do an rfi to set appropriate bits
- ;;
- rfi
- ;;
-IVirtual_Switch:
- //
- // We should now be running virtual
- //
- // Let's call the C handler to get the rest of the state info
- //
- alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
- ;;
- adds out0=16,sp // out0 = pointer to pt_regs
- ;;
- DO_SAVE_SWITCH_STACK
- .body
- adds out1=16,sp // out0 = pointer to switch_stack
-
- br.call.sptk.many rp=ia64_init_handler
-.ret1:
-
-return_from_init:
- br.sptk return_from_init
-END(ia64_monarch_init_handler)
-
-//
-// SAL to OS entry point for INIT on the slave processor
-// This has been defined for registration purposes with SAL
-// as a part of ia64_mca_init.
-//
-
-GLOBAL_ENTRY(ia64_slave_init_handler)
-1: br.sptk 1b
-END(ia64_slave_init_handler)
diff --git a/xen/arch/ia64/linux-xen/minstate.h b/xen/arch/ia64/linux-xen/minstate.h
deleted file mode 100644
index 5c582e2a40..0000000000
--- a/xen/arch/ia64/linux-xen/minstate.h
+++ /dev/null
@@ -1,306 +0,0 @@
-#include <linux/config.h>
-
-#include <asm/cache.h>
-
-#include "entry.h"
-
-/*
- * For ivt.s we want to access the stack virtually so we don't have to disable translation
- * on interrupts.
- *
- * On entry:
- * r1: pointer to current task (ar.k6)
- */
-#define MINSTATE_START_SAVE_MIN_VIRT \
-(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
- ;; \
-(pUStk) mov.m r24=ar.rnat; \
-(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
-(pKStk) mov r1=sp; /* get sp */ \
- ;; \
-(pUStk) lfetch.fault.excl.nt1 [r22]; \
-(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
-(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
- ;; \
-(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
-(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
- ;; \
-(pUStk) mov r18=ar.bsp; \
-(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
-
-#define MINSTATE_END_SAVE_MIN_VIRT \
- bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
- ;;
-
-/*
- * For mca_asm.S we want to access the stack physically since the state is saved before we
- * go virtual and don't want to destroy the iip or ipsr.
- */
-#ifdef XEN
-# define MINSTATE_START_SAVE_MIN_PHYS \
-(pKStk) tbit.z pKStk,pUStk=r29,IA64_PSR_VM_BIT; \
- ;; \
-(pKStk) movl r3=THIS_CPU(ia64_mca_data);; \
-(pKStk) tpa r3 = r3;; \
-(pKStk) ld8 r3 = [r3];; \
-(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
-(pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
-(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
-(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
- ;; \
-(pUStk) mov r24=ar.rnat; \
-(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
-(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
-(pUStk) dep r22=-1,r22,60,4; /* compute Xen virtual addr of RBS */ \
- ;; \
-(pUStk) mov ar.bspstore=r22; /* switch to Xen RBS */ \
- ;; \
-(pUStk) mov r18=ar.bsp; \
-(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
-
-# define MINSTATE_END_SAVE_MIN_PHYS \
- dep r12=-1,r12,60,4; /* make sp a Xen virtual address */ \
- ;;
-#else
-# define MINSTATE_START_SAVE_MIN_PHYS \
-(pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \
-(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
-(pKStk) ld8 r3 = [r3];; \
-(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
-(pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
-(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
-(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
- ;; \
-(pUStk) mov r24=ar.rnat; \
-(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
-(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
-(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
- ;; \
-(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
- ;; \
-(pUStk) mov r18=ar.bsp; \
-(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
-
-# define MINSTATE_END_SAVE_MIN_PHYS \
- dep r12=-1,r12,61,3; /* make sp a kernel virtual address */ \
- ;;
-#endif /* XEN */
-
-#ifdef MINSTATE_VIRT
-#ifdef XEN
-# define MINSTATE_GET_CURRENT(reg) \
- movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;; \
- ld8 reg=[reg]
-# define MINSTATE_GET_CURRENT_VIRT(reg) MINSTATE_GET_CURRENT(reg)
-#else
-# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT)
-#endif
-# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT
-# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT
-#endif
-
-#ifdef MINSTATE_PHYS
-# ifdef XEN
-# define MINSTATE_GET_CURRENT(reg) \
- movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;; \
- tpa reg=reg;; \
- ld8 reg=[reg];; \
- tpa reg=reg;;
-# define MINSTATE_GET_CURRENT_VIRT(reg) \
- movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;; \
- tpa reg=reg;; \
- ld8 reg=[reg];;
-#else
-# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT);; tpa reg=reg
-#endif /* XEN */
-# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS
-# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS
-#endif
-
-/*
- * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
- * the minimum state necessary that allows us to turn psr.ic back
- * on.
- *
- * Assumed state upon entry:
- * psr.ic: off
- * r31: contains saved predicates (pr)
- *
- * Upon exit, the state is as follows:
- * psr.ic: off
- * r2 = points to &pt_regs.r16
- * r8 = contents of ar.ccv
- * r9 = contents of ar.csd
- * r10 = contents of ar.ssd
- * r11 = FPSR_DEFAULT
- * r12 = kernel sp (kernel virtual address)
- * r13 = points to current task_struct (kernel virtual address)
- * p15 = TRUE if psr.i is set in cr.ipsr
- * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
- * preserved
- *
- * Note that psr.ic is NOT turned on by this macro. This is so that
- * we can pass interruption state as arguments to a handler.
- */
-#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
- MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
- mov r27=ar.rsc; /* M */ \
- mov r20=r1; /* A */ \
- mov r25=ar.unat; /* M */ \
- mov r29=cr.ipsr; /* M */ \
- mov r26=ar.pfs; /* I */ \
- mov r28=cr.iip; /* M */ \
- mov r21=ar.fpsr; /* M */ \
- COVER; /* B;; (or nothing) */ \
- ;; \
- adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
- ;; \
- ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
- st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
- adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
- /* switch from user to kernel RBS: */ \
- ;; \
- invala; /* M */ \
- SAVE_IFS; \
- cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
- ;; \
- MINSTATE_START_SAVE_MIN \
- adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
- adds r16=PT(CR_IPSR),r1; \
- ;; \
- lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
- st8 [r16]=r29; /* save cr.ipsr */ \
- ;; \
- lfetch.fault.excl.nt1 [r17]; \
- tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
- mov r29=b0 \
- ;; \
- adds r16=PT(R8),r1; /* initialize first base pointer */ \
- adds r17=PT(R9),r1; /* initialize second base pointer */ \
-(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r8,16; \
-.mem.offset 8,0; st8.spill [r17]=r9,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r10,24; \
-.mem.offset 8,0; st8.spill [r17]=r11,24; \
- ;; \
- st8 [r16]=r28,16; /* save cr.iip */ \
- st8 [r17]=r30,16; /* save cr.ifs */ \
-(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
- mov r8=ar.ccv; \
- mov r9=ar.csd; \
- mov r10=ar.ssd; \
- movl r11=FPSR_DEFAULT; /* L-unit */ \
- ;; \
- st8 [r16]=r25,16; /* save ar.unat */ \
- st8 [r17]=r26,16; /* save ar.pfs */ \
- shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
- ;; \
- st8 [r16]=r27,16; /* save ar.rsc */ \
-(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
-(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
- ;; /* avoid RAW on r16 & r17 */ \
-(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
- st8 [r17]=r31,16; /* save predicates */ \
-(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
- ;; \
- st8 [r16]=r29,16; /* save b0 */ \
- st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
- cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
-.mem.offset 8,0; st8.spill [r17]=r12,16; \
- adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r13,16; \
-.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
- /* XEN mov r13=IA64_KR(CURRENT);*/ /* establish `current' */ \
- MINSTATE_GET_CURRENT_VIRT(r13); /* XEN establish `current' */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r15,16; \
-.mem.offset 8,0; st8.spill [r17]=r14,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r2,16; \
-.mem.offset 8,0; st8.spill [r17]=r3,16; \
- adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
- ;; \
- EXTRA; \
- movl r1=__gp; /* establish kernel global pointer */ \
- ;; \
- MINSTATE_END_SAVE_MIN
-
-/*
- * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
- *
- * Assumed state upon entry:
- * psr.ic: on
- * r2: points to &pt_regs.r16
- * r3: points to &pt_regs.r17
- * r8: contents of ar.ccv
- * r9: contents of ar.csd
- * r10: contents of ar.ssd
- * r11: FPSR_DEFAULT
- *
- * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
- */
-#define SAVE_REST \
-.mem.offset 0,0; st8.spill [r2]=r16,16; \
-.mem.offset 8,0; st8.spill [r3]=r17,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r18,16; \
-.mem.offset 8,0; st8.spill [r3]=r19,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r20,16; \
-.mem.offset 8,0; st8.spill [r3]=r21,16; \
- mov r18=b6; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r22,16; \
-.mem.offset 8,0; st8.spill [r3]=r23,16; \
- mov r19=b7; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r24,16; \
-.mem.offset 8,0; st8.spill [r3]=r25,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r26,16; \
-.mem.offset 8,0; st8.spill [r3]=r27,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r28,16; \
-.mem.offset 8,0; st8.spill [r3]=r29,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r30,16; \
-.mem.offset 8,0; st8.spill [r3]=r31,32; \
- ;; \
- mov ar.fpsr=r11; /* M-unit */ \
- st8 [r2]=r8,8; /* ar.ccv */ \
- adds r24=PT(B6)-PT(F7),r3; \
- ;; \
- stf.spill [r2]=f6,32; \
- stf.spill [r3]=f7,32; \
- ;; \
- stf.spill [r2]=f8,32; \
- stf.spill [r3]=f9,32; \
- ;; \
- stf.spill [r2]=f10,32; \
- stf.spill [r3]=f11,24; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r4,16; \
-.mem.offset 8,0; st8.spill [r3]=r5,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r6,16; \
-.mem.offset 8,0; st8.spill [r3]=r7; \
- adds r25=PT(B7)-PT(R7),r3; \
- ;; \
- st8 [r24]=r18,16; /* b6 */ \
- st8 [r25]=r19,16; /* b7 */ \
- ;; \
- st8 [r24]=r9; /* ar.csd */ \
- mov r26=ar.unat; \
- ;; \
- st8 [r25]=r10; /* ar.ssd */ \
- st8 [r2]=r26; /* eml_unat */ \
- ;;
-
-#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
-#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
-#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )
diff --git a/xen/arch/ia64/linux-xen/mm_contig.c b/xen/arch/ia64/linux-xen/mm_contig.c
deleted file mode 100644
index f2326eb394..0000000000
--- a/xen/arch/ia64/linux-xen/mm_contig.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
- *
- * Routines used by ia64 machines with contiguous (or virtually contiguous)
- * memory.
- */
-#include <linux/config.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-
-#include <asm/meminit.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-#include <asm/mca.h>
-
-#include <linux/efi.h>
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static unsigned long num_dma_physpages;
-#endif
-
-/**
- * show_mem - display a memory statistics summary
- *
- * Just walks the pages in the system and describes where they're allocated.
- */
-#ifndef XEN
-void
-show_mem (void)
-{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
-
- printk("Mem-info:\n");
- show_free_areas();
-
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- while (i-- > 0) {
- if (!mfn_valid(i))
- continue;
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (page_count(mem_map + i))
- shared += page_count(mem_map + i) - 1;
- }
- printk("%d pages of RAM\n", total);
- printk("%d reserved pages\n", reserved);
- printk("%d pages shared\n", shared);
- printk("%d pages swap cached\n", cached);
- printk("%ld pages in page table cache\n",
- pgtable_quicklist_total_size());
-}
-#endif
-
-/* physical address where the bootmem map is located */
-unsigned long bootmap_start;
-
-/**
- * find_max_pfn - adjust the maximum page number callback
- * @start: start of range
- * @end: end of range
- * @arg: address of pointer to global max_pfn variable
- *
- * Passed as a callback function to efi_memmap_walk() to determine the highest
- * available page frame number in the system.
- */
-int
-find_max_pfn (unsigned long start, unsigned long end, void *arg)
-{
- unsigned long *max_pfnp = arg, pfn;
-
- pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
- if (pfn > *max_pfnp)
- *max_pfnp = pfn;
- return 0;
-}
-
-/**
- * find_bootmap_location - callback to find a memory area for the bootmap
- * @start: start of region
- * @end: end of region
- * @arg: unused callback data
- *
- * Find a place to put the bootmap and return its starting address in
- * bootmap_start. This address must be page-aligned.
- */
-int
-find_bootmap_location (unsigned long start, unsigned long end, void *arg)
-{
- unsigned long needed = *(unsigned long *)arg;
- unsigned long range_start, range_end, free_start;
- int i;
-
-#if IGNORE_PFN0
- if (start == PAGE_OFFSET) {
- start += PAGE_SIZE;
- if (start >= end)
- return 0;
- }
-#endif
-
- free_start = PAGE_OFFSET;
-
- for (i = 0; i < num_rsvd_regions; i++) {
- range_start = max(start, free_start);
- range_end = min(end, rsvd_region[i].start & PAGE_MASK);
-
- free_start = PAGE_ALIGN(rsvd_region[i].end);
-
- if (range_end <= range_start)
- continue; /* skip over empty range */
-
- if (range_end - range_start >= needed) {
- bootmap_start = __pa(range_start);
- return -1; /* done */
- }
-
- /* nothing more available in this segment */
- if (range_end == end)
- return 0;
- }
- return 0;
-}
-
-/**
- * find_memory - setup memory map
- *
- * Walk the EFI memory map and find usable memory for the system, taking
- * into account reserved areas.
- */
-#ifndef XEN
-void
-find_memory (void)
-{
- unsigned long bootmap_size;
-
- reserve_memory();
-
- /* first find highest page frame number */
- max_pfn = 0;
- efi_memmap_walk(find_max_pfn, &max_pfn);
-
- /* how many bytes to cover all the pages */
- bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
-
- /* look for a location to hold the bootmap */
- bootmap_start = ~0UL;
- efi_memmap_walk(find_bootmap_location, &bootmap_size);
- if (bootmap_start == ~0UL)
- panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
-
- bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
-
- /* Free all available memory, then mark bootmem-map as being in use. */
- efi_memmap_walk(filter_rsvd_memory, free_bootmem);
- reserve_bootmem(bootmap_start, bootmap_size);
-
- find_initrd();
-}
-#endif
-
-#ifdef CONFIG_SMP
-#ifdef XEN
-#include <asm/elf.h>
-
-void *percpu_area __initdata = NULL;
-
-void* __init
-per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa)
-{
- int order = get_order((NR_CPUS - 1) * PERCPU_PAGE_SIZE);
- unsigned long size = 1UL << (order + PAGE_SHIFT);
- unsigned long start = ALIGN_UP((unsigned long)xen_heap_start,
- PERCPU_PAGE_SIZE);
- unsigned long end = start + size;
-
- if (__pa(end) < end_in_pa) {
- init_boot_pages(__pa(xen_heap_start), __pa(start));
- xen_heap_start = (void*)end;
- percpu_area = (void*)virt_to_xenva(start);
- printk("allocate percpu area 0x%lx@0x%lx 0x%p\n",
- size, start, percpu_area);
- } else {
- panic("can't allocate percpu area. size 0x%lx\n", size);
- }
- return xen_heap_start;
-}
-
-static void* __init
-get_per_cpu_area(void)
-{
- return percpu_area;
-}
-#endif
-
-/**
- * per_cpu_init - setup per-cpu variables
- *
- * Allocate and setup per-cpu data areas.
- */
-void *
-per_cpu_init (void)
-{
- void *cpu_data;
- int cpu;
-
- /*
- * get_free_pages() cannot be used before cpu_init() done. BSP
- * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
- * get_zeroed_page().
- */
- if (smp_processor_id() == 0) {
-#ifdef XEN
- void *cpu0_data = __cpu0_per_cpu;
-
- __per_cpu_offset[0] = (char *)cpu0_data - __per_cpu_start;
- per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
-
- cpu_data = get_per_cpu_area();
- if (cpu_data == NULL)
- panic("can't allocate per cpu area.\n");
-
- for (cpu = 1; cpu < NR_CPUS; cpu++) {
- memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
- __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
- cpu_data += PERCPU_PAGE_SIZE;
- per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
- }
-#else
- cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
- PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
- __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
- cpu_data += PERCPU_PAGE_SIZE;
- per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
- }
-#endif
- }
- return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-}
-#endif /* CONFIG_SMP */
-
-#ifndef XEN
-static int
-count_pages (u64 start, u64 end, void *arg)
-{
- unsigned long *count = arg;
-
- *count += (end - start) >> PAGE_SHIFT;
- return 0;
-}
-
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static int
-count_dma_pages (u64 start, u64 end, void *arg)
-{
- unsigned long *count = arg;
-
- if (start < MAX_DMA_ADDRESS)
- *count += (min(end, MAX_DMA_ADDRESS) - start) >> PAGE_SHIFT;
- return 0;
-}
-#endif
-
-/*
- * Set up the page tables.
- */
-
-void
-paging_init (void)
-{
- unsigned long max_dma;
- unsigned long zones_size[MAX_NR_ZONES];
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- unsigned long zholes_size[MAX_NR_ZONES];
- unsigned long max_gap;
-#endif
-
- /* initialize mem_map[] */
-
- memset(zones_size, 0, sizeof(zones_size));
-
- num_physpages = 0;
- efi_memmap_walk(count_pages, &num_physpages);
-
- max_dma = virt_to_maddr((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- memset(zholes_size, 0, sizeof(zholes_size));
-
- num_dma_physpages = 0;
- efi_memmap_walk(count_dma_pages, &num_dma_physpages);
-
- if (max_low_pfn < max_dma) {
- zones_size[ZONE_DMA] = max_low_pfn;
- zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
- } else {
- zones_size[ZONE_DMA] = max_dma;
- zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
- if (num_physpages > num_dma_physpages) {
- zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
- zholes_size[ZONE_NORMAL] =
- ((max_low_pfn - max_dma) -
- (num_physpages - num_dma_physpages));
- }
- }
-
- max_gap = 0;
- efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
- if (max_gap < LARGE_GAP) {
- vmem_map = (struct page *) 0;
- free_area_init_node(0, &contig_page_data, zones_size, 0,
- zholes_size);
- } else {
- unsigned long map_size;
-
- /* allocate virtual_mem_map */
-
- map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
- vmalloc_end -= map_size;
- vmem_map = (struct page *) vmalloc_end;
- efi_memmap_walk(create_mem_map_page_table, NULL);
-
- NODE_DATA(0)->node_mem_map = vmem_map;
- free_area_init_node(0, &contig_page_data, zones_size,
- 0, zholes_size);
-
- printk("Virtual mem_map starts at 0x%p\n", mem_map);
- }
-#else /* !CONFIG_VIRTUAL_MEM_MAP */
- if (max_low_pfn < max_dma)
- zones_size[ZONE_DMA] = max_low_pfn;
- else {
- zones_size[ZONE_DMA] = max_dma;
- zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
- }
- free_area_init(zones_size);
-#endif /* !CONFIG_VIRTUAL_MEM_MAP */
- zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
-}
-#endif /* XEN */
diff --git a/xen/arch/ia64/linux-xen/numa.c b/xen/arch/ia64/linux-xen/numa.c
deleted file mode 100644
index 81b5b88f01..0000000000
--- a/xen/arch/ia64/linux-xen/numa.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * ia64 kernel NUMA specific stuff
- *
- * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
- * Copyright (C) 2004 Silicon Graphics, Inc.
- * Jesse Barnes <jbarnes@sgi.com>
- */
-#ifdef XEN
-#include <xen/types.h>
-#endif
-#include <linux/config.h>
-#include <linux/topology.h>
-#include <linux/module.h>
-#include <asm/processor.h>
-#include <asm/smp.h>
-#ifdef XEN
-#include <xen/nodemask.h>
-#endif
-
-#ifdef XEN
-nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
-#endif
-
-u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_to_node_map);
-
-cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
-
-/**
- * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
- *
- * Build cpu to node mapping and initialize the per node cpu masks using
- * info from the node_cpuid array handed to us by ACPI.
- */
-void __init build_cpu_to_node_map(void)
-{
- int cpu, i, node;
-
- for(node=0; node < MAX_NUMNODES; node++)
- cpumask_clear(&node_to_cpu_mask[node]);
-
- for(cpu = 0; cpu < NR_CPUS; ++cpu) {
- node = -1;
- for (i = 0; i < NR_CPUS; ++i)
- if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
- node = node_cpuid[i].nid;
- break;
- }
- cpu_to_node_map[cpu] = (node >= 0) ? node : 0;
- if (node >= 0)
- cpumask_set_cpu(cpu, &node_to_cpu_mask[node]);
- }
-}
diff --git a/xen/arch/ia64/linux-xen/perfmon.c b/xen/arch/ia64/linux-xen/perfmon.c
deleted file mode 100644
index 18f8be9d7f..0000000000
--- a/xen/arch/ia64/linux-xen/perfmon.c
+++ /dev/null
@@ -1,7871 +0,0 @@
-/*
- * This file implements the perfmon-2 subsystem which is used
- * to program the IA-64 Performance Monitoring Unit (PMU).
- *
- * The initial version of perfmon.c was written by
- * Ganesh Venkitachalam, IBM Corp.
- *
- * Then it was modified for perfmon-1.x by Stephane Eranian and
- * David Mosberger, Hewlett Packard Co.
- *
- * Version Perfmon-2.x is a rewrite of perfmon-1.x
- * by Stephane Eranian, Hewlett Packard Co.
- *
- * Copyright (C) 1999-2005 Hewlett Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * More information about perfmon available at:
- * http://www.hpl.hp.com/research/linux/perfmon
- *
- *
- * For Xen/IA64 xenoprof
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/sysctl.h>
-#include <linux/list.h>
-#include <linux/file.h>
-#include <linux/poll.h>
-#include <linux/vfs.h>
-#include <linux/pagemap.h>
-#include <linux/mount.h>
-#include <linux/bitops.h>
-#include <linux/capability.h>
-#include <linux/rcupdate.h>
-#include <linux/completion.h>
-
-#ifndef XEN
-#include <asm/errno.h>
-#else
-#include <xen/errno.h>
-#endif
-#include <asm/intrinsics.h>
-#include <asm/page.h>
-#include <asm/perfmon.h>
-#include <asm/processor.h>
-#include <asm/signal.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/delay.h>
-
-#ifdef XEN
-#include <xen/guest_access.h>
-#include <asm/hw_irq.h>
-#define CONFIG_PERFMON
-#define pid vcpu_id
-#define thread arch._thread
-#define task_pt_regs vcpu_regs
-
-#define PMC_USER (1UL << 3)
-#define PMC_KERNEL (1UL << 0)
-#define PMC_XEN_AND_GUEST ((1UL << 0) | (1UL << 1) | (1UL << 2))
-#define PMC_PRIV_MONITOR (1UL << 6)
-
-#undef ia64_set_pmc
-#define ia64_set_pmc(index, val) \
-do { \
- u64 __index = (index); \
- u64 __val = (val); \
- /* bad hack! \
- * At this moment Linux perfmon knows only kernel and user \
- * so that it sets only pmc.plm[0] and pmc.plm[3]. \
- * On the other hand what we want is to sample on the whole \
- * system. i.e. user, guest kernel and xen VMM. \
- * Thus here we enable pmc.plm[2:1] too for generic pmc/pmd. \
- * \
- * But we can not do it genericly for the implementation \
- * dependent pmc/pmd. \
- * Probably such knowlege should be taught to the oprofiled or \
- * the xenified perfmon. \
- */ \
- if (pmu_conf != NULL && PMC_IS_COUNTING(__index) && \
- (__val & PMC_KERNEL)) \
- __val |= PMC_XEN_AND_GUEST | PMC_PRIV_MONITOR; \
- asm volatile ("mov pmc[%0]=%1" :: \
- "r"(__index), "r"(__val) : "memory"); \
-} while (0)
-#endif
-
-#ifdef CONFIG_PERFMON
-/*
- * perfmon context state
- */
-#define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
-#define PFM_CTX_LOADED 2 /* context is loaded onto a task */
-#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
-#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
-
-#define PFM_INVALID_ACTIVATION (~0UL)
-
-/*
- * depth of message queue
- */
-#define PFM_MAX_MSGS 32
-#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
-
-/*
- * type of a PMU register (bitmask).
- * bitmask structure:
- * bit0 : register implemented
- * bit1 : end marker
- * bit2-3 : reserved
- * bit4 : pmc has pmc.pm
- * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
- * bit6-7 : register type
- * bit8-31: reserved
- */
-#define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
-#define PFM_REG_IMPL 0x1 /* register implemented */
-#define PFM_REG_END 0x2 /* end marker */
-#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
-#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
-#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
-#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
-#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
-
-#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
-#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
-
-#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
-
-/* i assumed unsigned */
-#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
-#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
-
-/* XXX: these assume that register i is implemented */
-#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
-#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
-#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
-#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
-
-#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
-#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
-#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
-#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
-
-#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
-#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
-
-#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
-#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
-#define PFM_CTX_TASK(h) (h)->ctx_task
-
-#define PMU_PMC_OI 5 /* position of pmc.oi bit */
-
-/* XXX: does not support more than 64 PMDs */
-#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
-#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
-
-#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
-
-#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
-#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
-#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
-#define PFM_CODE_RR 0 /* requesting code range restriction */
-#define PFM_DATA_RR 1 /* requestion data range restriction */
-
-#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
-#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
-#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
-
-#define RDEP(x) (1UL<<(x))
-
-/*
- * context protection macros
- * in SMP:
- * - we need to protect against CPU concurrency (spin_lock)
- * - we need to protect against PMU overflow interrupts (local_irq_disable)
- * in UP:
- * - we need to protect against PMU overflow interrupts (local_irq_disable)
- *
- * spin_lock_irqsave()/spin_lock_irqrestore():
- * in SMP: local_irq_disable + spin_lock
- * in UP : local_irq_disable
- *
- * spin_lock()/spin_lock():
- * in UP : removed automatically
- * in SMP: protect against context accesses from other CPU. interrupts
- * are not masked. This is useful for the PMU interrupt handler
- * because we know we will not get PMU concurrency in that code.
- */
-#define PROTECT_CTX(c, f) \
- do { \
- DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \
- spin_lock_irqsave(&(c)->ctx_lock, f); \
- DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \
- } while(0)
-
-#define UNPROTECT_CTX(c, f) \
- do { \
- DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \
- spin_unlock_irqrestore(&(c)->ctx_lock, f); \
- } while(0)
-
-#define PROTECT_CTX_NOPRINT(c, f) \
- do { \
- spin_lock_irqsave(&(c)->ctx_lock, f); \
- } while(0)
-
-
-#define UNPROTECT_CTX_NOPRINT(c, f) \
- do { \
- spin_unlock_irqrestore(&(c)->ctx_lock, f); \
- } while(0)
-
-
-#define PROTECT_CTX_NOIRQ(c) \
- do { \
- spin_lock(&(c)->ctx_lock); \
- } while(0)
-
-#define UNPROTECT_CTX_NOIRQ(c) \
- do { \
- spin_unlock(&(c)->ctx_lock); \
- } while(0)
-
-
-#ifdef CONFIG_SMP
-
-#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
-#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
-#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
-
-#else /* !CONFIG_SMP */
-#define SET_ACTIVATION(t) do {} while(0)
-#define GET_ACTIVATION(t) do {} while(0)
-#define INC_ACTIVATION(t) do {} while(0)
-#endif /* CONFIG_SMP */
-
-#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
-#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
-#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
-
-#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
-#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
-
-#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
-
-/*
- * cmp0 must be the value of pmc0
- */
-#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
-
-#define PFMFS_MAGIC 0xa0b4d889
-
-/*
- * debugging
- */
-#define PFM_DEBUGGING 1
-#ifdef PFM_DEBUGGING
-#define DPRINT(a) \
- do { \
- if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
- } while (0)
-
-#define DPRINT_ovfl(a) \
- do { \
- if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
- } while (0)
-#endif
-
-/*
- * 64-bit software counter structure
- *
- * the next_reset_type is applied to the next call to pfm_reset_regs()
- */
-typedef struct {
- unsigned long val; /* virtual 64bit counter value */
- unsigned long lval; /* last reset value */
- unsigned long long_reset; /* reset value on sampling overflow */
- unsigned long short_reset; /* reset value on overflow */
- unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
- unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
- unsigned long seed; /* seed for random-number generator */
- unsigned long mask; /* mask for random-number generator */
- unsigned int flags; /* notify/do not notify */
- unsigned long eventid; /* overflow event identifier */
-} pfm_counter_t;
-
-/*
- * context flags
- */
-typedef struct {
- unsigned int block:1; /* when 1, task will blocked on user notifications */
- unsigned int system:1; /* do system wide monitoring */
- unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
- unsigned int is_sampling:1; /* true if using a custom format */
- unsigned int excl_idle:1; /* exclude idle task in system wide session */
- unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
- unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
- unsigned int no_msg:1; /* no message sent on overflow */
- unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
- unsigned int reserved:22;
-} pfm_context_flags_t;
-
-#define PFM_TRAP_REASON_NONE 0x0 /* default value */
-#define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
-#define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
-
-
-/*
- * perfmon context: encapsulates all the state of a monitoring session
- */
-
-typedef struct pfm_context {
- spinlock_t ctx_lock; /* context protection */
-
- pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
- unsigned int ctx_state; /* state: active/inactive (no bitfield) */
-
- struct task_struct *ctx_task; /* task to which context is attached */
-
- unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
-
-#ifndef XEN
- struct completion ctx_restart_done; /* use for blocking notification mode */
-#endif
-
- unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
- unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
- unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
-
- unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
- unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
- unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
-
- unsigned long ctx_pmcs[IA64_NUM_PMC_REGS]; /* saved copies of PMC values */
-
- unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
- unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
- unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
- unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
-
- pfm_counter_t ctx_pmds[IA64_NUM_PMD_REGS]; /* software state for PMDS */
-
- u64 ctx_saved_psr_up; /* only contains psr.up value */
-
- unsigned long ctx_last_activation; /* context last activation number for last_cpu */
- unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
- unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
-
- int ctx_fd; /* file descriptor used my this context */
- pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
-
- pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
- void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
- unsigned long ctx_smpl_size; /* size of sampling buffer */
- void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
-
-#ifndef XEN
- wait_queue_head_t ctx_msgq_wait;
- pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
- int ctx_msgq_head;
- int ctx_msgq_tail;
- struct fasync_struct *ctx_async_queue;
-
- wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
-#endif
-} pfm_context_t;
-
-/*
- * magic number used to verify that structure is really
- * a perfmon context
- */
-#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
-
-#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
-
-#ifdef CONFIG_SMP
-#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
-#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
-#else
-#define SET_LAST_CPU(ctx, v) do {} while(0)
-#define GET_LAST_CPU(ctx) do {} while(0)
-#endif
-
-
-#define ctx_fl_block ctx_flags.block
-#define ctx_fl_system ctx_flags.system
-#define ctx_fl_using_dbreg ctx_flags.using_dbreg
-#define ctx_fl_is_sampling ctx_flags.is_sampling
-#define ctx_fl_excl_idle ctx_flags.excl_idle
-#define ctx_fl_going_zombie ctx_flags.going_zombie
-#define ctx_fl_trap_reason ctx_flags.trap_reason
-#define ctx_fl_no_msg ctx_flags.no_msg
-#define ctx_fl_can_restart ctx_flags.can_restart
-
-#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
-#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
-
-/*
- * global information about all sessions
- * mostly used to synchronize between system wide and per-process
- */
-typedef struct {
- spinlock_t pfs_lock; /* lock the structure */
-
- unsigned int pfs_task_sessions; /* number of per task sessions */
- unsigned int pfs_sys_sessions; /* number of per system wide sessions */
- unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
- unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
- struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
-#ifdef XEN
-#define XENOPROF_TASK ((struct task_struct*)1)
-#endif
-} pfm_session_t;
-
-/*
- * information about a PMC or PMD.
- * dep_pmd[]: a bitmask of dependent PMD registers
- * dep_pmc[]: a bitmask of dependent PMC registers
- */
-typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-typedef struct {
- unsigned int type;
- int pm_pos;
- unsigned long default_value; /* power-on default value */
- unsigned long reserved_mask; /* bitmask of reserved bits */
- pfm_reg_check_t read_check;
- pfm_reg_check_t write_check;
- unsigned long dep_pmd[4];
- unsigned long dep_pmc[4];
-} pfm_reg_desc_t;
-
-/* assume cnum is a valid monitor */
-#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
-
-/*
- * This structure is initialized at boot time and contains
- * a description of the PMU main characteristics.
- *
- * If the probe function is defined, detection is based
- * on its return value:
- * - 0 means recognized PMU
- * - anything else means not supported
- * When the probe function is not defined, then the pmu_family field
- * is used and it must match the host CPU family such that:
- * - cpu->family & config->pmu_family != 0
- */
-typedef struct {
- unsigned long ovfl_val; /* overflow value for counters */
-
- pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
- pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
-
- unsigned int num_pmcs; /* number of PMCS: computed at init time */
- unsigned int num_pmds; /* number of PMDS: computed at init time */
- unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
- unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
-
- char *pmu_name; /* PMU family name */
- unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
- unsigned int flags; /* pmu specific flags */
- unsigned int num_ibrs; /* number of IBRS: computed at init time */
- unsigned int num_dbrs; /* number of DBRS: computed at init time */
- unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
- int (*probe)(void); /* customized probe routine */
- unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
-} pmu_config_t;
-/*
- * PMU specific flags
- */
-#define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
-
-/*
- * debug register related type definitions
- */
-typedef struct {
- unsigned long ibr_mask:56;
- unsigned long ibr_plm:4;
- unsigned long ibr_ig:3;
- unsigned long ibr_x:1;
-} ibr_mask_reg_t;
-
-typedef struct {
- unsigned long dbr_mask:56;
- unsigned long dbr_plm:4;
- unsigned long dbr_ig:2;
- unsigned long dbr_w:1;
- unsigned long dbr_r:1;
-} dbr_mask_reg_t;
-
-typedef union {
- unsigned long val;
- ibr_mask_reg_t ibr;
- dbr_mask_reg_t dbr;
-} dbreg_t;
-
-
-/*
- * perfmon command descriptions
- */
-typedef struct {
- int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
- char *cmd_name;
- int cmd_flags;
- unsigned int cmd_narg;
- size_t cmd_argsize;
- int (*cmd_getsize)(void *arg, size_t *sz);
-} pfm_cmd_desc_t;
-
-#define PFM_CMD_FD 0x01 /* command requires a file descriptor */
-#define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
-#define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
-#define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
-
-
-#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
-#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
-#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
-#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
-#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
-
-#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
-
-typedef struct {
- unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
- unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
- unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
- unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
- unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
- unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
- unsigned long pfm_smpl_handler_calls;
- unsigned long pfm_smpl_handler_cycles;
- char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
-} pfm_stats_t;
-
-/*
- * perfmon internal variables
- */
-static pfm_stats_t pfm_stats[NR_CPUS];
-static pfm_session_t pfm_sessions; /* global sessions information */
-
-#ifndef XEN
-static DEFINE_SPINLOCK(pfm_alt_install_check);
-#endif
-static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
-
-#ifndef XEN
-static struct proc_dir_entry *perfmon_dir;
-#endif
-static pfm_uuid_t pfm_null_uuid = {0,};
-
-static spinlock_t pfm_buffer_fmt_lock;
-static LIST_HEAD(pfm_buffer_fmt_list);
-
-static pmu_config_t *pmu_conf;
-
-/* sysctl() controls */
-pfm_sysctl_t pfm_sysctl;
-EXPORT_SYMBOL(pfm_sysctl);
-
-#ifndef XEN
-static ctl_table pfm_ctl_table[]={
- {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
- {2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
- {3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
- {4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
- { 0, },
-};
-static ctl_table pfm_sysctl_dir[] = {
- {1, "perfmon", NULL, 0, 0755, pfm_ctl_table, },
- {0,},
-};
-static ctl_table pfm_sysctl_root[] = {
- {1, "kernel", NULL, 0, 0755, pfm_sysctl_dir, },
- {0,},
-};
-static struct ctl_table_header *pfm_sysctl_header;
-
-static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
-static int pfm_flush(struct file *filp);
-#endif
-
-#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
-#define pfm_get_cpu_data(a,b) per_cpu(a, b)
-
-#ifndef XEN
-static inline void
-pfm_put_task(struct task_struct *task)
-{
- if (task != current) put_task_struct(task);
-}
-
-static inline void
-pfm_set_task_notify(struct task_struct *task)
-{
- struct thread_info *info;
-
- info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
- set_bit(TIF_NOTIFY_RESUME, &info->flags);
-}
-
-static inline void
-pfm_clear_task_notify(void)
-{
- clear_thread_flag(TIF_NOTIFY_RESUME);
-}
-
-static inline void
-pfm_reserve_page(unsigned long a)
-{
- SetPageReserved(vmalloc_to_page((void *)a));
-}
-static inline void
-pfm_unreserve_page(unsigned long a)
-{
- ClearPageReserved(vmalloc_to_page((void*)a));
-}
-#endif
-
-static inline unsigned long
-pfm_protect_ctx_ctxsw(pfm_context_t *x)
-{
- spin_lock(&(x)->ctx_lock);
- return 0UL;
-}
-
-static inline void
-pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
-{
- spin_unlock(&(x)->ctx_lock);
-}
-
-#ifndef XEN
-static inline unsigned int
-pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
-{
- return do_munmap(mm, addr, len);
-}
-
-static inline unsigned long
-pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
-{
- return get_unmapped_area(file, addr, len, pgoff, flags);
-}
-
-
-static struct super_block *
-pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
-{
- return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
-}
-
-static struct file_system_type pfm_fs_type = {
- .name = "pfmfs",
- .get_sb = pfmfs_get_sb,
- .kill_sb = kill_anon_super,
-};
-#endif
-
-DEFINE_PER_CPU(unsigned long, pfm_syst_info);
-DEFINE_PER_CPU(struct task_struct *, pmu_owner);
-DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
-DEFINE_PER_CPU(unsigned long, pmu_activation_number);
-#ifndef XEN
-EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
-
-
-/* forward declaration */
-static struct file_operations pfm_file_ops;
-#endif
-
-/*
- * forward declarations
- */
-#ifndef CONFIG_SMP
-static void pfm_lazy_save_regs (struct task_struct *ta);
-#endif
-
-void dump_pmu_state(const char *);
-static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
-
-#include "perfmon_itanium.h"
-#include "perfmon_mckinley.h"
-#include "perfmon_montecito.h"
-#include "perfmon_generic.h"
-
-static pmu_config_t *pmu_confs[]={
- &pmu_conf_mont,
- &pmu_conf_mck,
- &pmu_conf_ita,
- &pmu_conf_gen, /* must be last */
- NULL
-};
-
-
-#ifndef XEN
-static int pfm_end_notify_user(pfm_context_t *ctx);
-#endif
-
-static inline void
-pfm_clear_psr_pp(void)
-{
- ia64_rsm(IA64_PSR_PP);
- ia64_srlz_i();
-}
-
-static inline void
-pfm_set_psr_pp(void)
-{
- ia64_ssm(IA64_PSR_PP);
- ia64_srlz_i();
-}
-
-static inline void
-pfm_clear_psr_up(void)
-{
- ia64_rsm(IA64_PSR_UP);
- ia64_srlz_i();
-}
-
-static inline void
-pfm_set_psr_up(void)
-{
- ia64_ssm(IA64_PSR_UP);
- ia64_srlz_i();
-}
-
-static inline unsigned long
-pfm_get_psr(void)
-{
- unsigned long tmp;
- tmp = ia64_getreg(_IA64_REG_PSR);
- ia64_srlz_i();
- return tmp;
-}
-
-static inline void
-pfm_set_psr_l(unsigned long val)
-{
- ia64_setreg(_IA64_REG_PSR_L, val);
- ia64_srlz_i();
-}
-
-static inline void
-pfm_freeze_pmu(void)
-{
- ia64_set_pmc(0,1UL);
- ia64_srlz_d();
-}
-
-static inline void
-pfm_unfreeze_pmu(void)
-{
- ia64_set_pmc(0,0UL);
- ia64_srlz_d();
-}
-
-static inline void
-pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
-{
- int i;
-
- for (i=0; i < nibrs; i++) {
- ia64_set_ibr(i, ibrs[i]);
- ia64_dv_serialize_instruction();
- }
- ia64_srlz_i();
-}
-
-static inline void
-pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
-{
- int i;
-
- for (i=0; i < ndbrs; i++) {
- ia64_set_dbr(i, dbrs[i]);
- ia64_dv_serialize_data();
- }
- ia64_srlz_d();
-}
-
-/*
- * PMD[i] must be a counter. no check is made
- */
-static inline unsigned long
-pfm_read_soft_counter(pfm_context_t *ctx, int i)
-{
- return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
-}
-
-/*
- * PMD[i] must be a counter. no check is made
- */
-static inline void
-pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
-{
- unsigned long ovfl_val = pmu_conf->ovfl_val;
-
- ctx->ctx_pmds[i].val = val & ~ovfl_val;
- /*
- * writing to unimplemented part is ignore, so we do not need to
- * mask off top part
- */
- ia64_set_pmd(i, val & ovfl_val);
-}
-
-#ifndef XEN
-static pfm_msg_t *
-pfm_get_new_msg(pfm_context_t *ctx)
-{
- int idx, next;
-
- next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
-
- DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
- if (next == ctx->ctx_msgq_head) return NULL;
-
- idx = ctx->ctx_msgq_tail;
- ctx->ctx_msgq_tail = next;
-
- DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
-
- return ctx->ctx_msgq+idx;
-}
-
-static pfm_msg_t *
-pfm_get_next_msg(pfm_context_t *ctx)
-{
- pfm_msg_t *msg;
-
- DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
-
- if (PFM_CTXQ_EMPTY(ctx)) return NULL;
-
- /*
- * get oldest message
- */
- msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
-
- /*
- * and move forward
- */
- ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
-
- DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
-
- return msg;
-}
-
-static void
-pfm_reset_msgq(pfm_context_t *ctx)
-{
- ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
- DPRINT(("ctx=%p msgq reset\n", ctx));
-}
-
-static void *
-pfm_rvmalloc(unsigned long size)
-{
- void *mem;
- unsigned long addr;
-
- size = PAGE_ALIGN(size);
- mem = vmalloc(size);
- if (mem) {
- //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
- memset(mem, 0, size);
- addr = (unsigned long)mem;
- while (size > 0) {
- pfm_reserve_page(addr);
- addr+=PAGE_SIZE;
- size-=PAGE_SIZE;
- }
- }
- return mem;
-}
-
-static void
-pfm_rvfree(void *mem, unsigned long size)
-{
- unsigned long addr;
-
- if (mem) {
- DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
- addr = (unsigned long) mem;
- while ((long) size > 0) {
- pfm_unreserve_page(addr);
- addr+=PAGE_SIZE;
- size-=PAGE_SIZE;
- }
- vfree(mem);
- }
- return;
-}
-#endif
-
-static pfm_context_t *
-pfm_context_alloc(void)
-{
- pfm_context_t *ctx;
-
- /*
- * allocate context descriptor
- * must be able to free with interrupts disabled
- */
- ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);
- if (ctx) {
- memset(ctx, 0, sizeof(pfm_context_t));
- DPRINT(("alloc ctx @%p\n", ctx));
- }
- return ctx;
-}
-
-static void
-pfm_context_free(pfm_context_t *ctx)
-{
- if (ctx) {
- DPRINT(("free ctx @%p\n", ctx));
- kfree(ctx);
- }
-}
-
-#ifndef XEN
-static void
-pfm_mask_monitoring(struct task_struct *task)
-{
- pfm_context_t *ctx = PFM_GET_CTX(task);
- struct thread_struct *th = &task->thread;
- unsigned long mask, val, ovfl_mask;
- int i;
-
- DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));
-
- ovfl_mask = pmu_conf->ovfl_val;
- /*
- * monitoring can only be masked as a result of a valid
- * counter overflow. In UP, it means that the PMU still
- * has an owner. Note that the owner can be different
- * from the current task. However the PMU state belongs
- * to the owner.
- * In SMP, a valid overflow only happens when task is
- * current. Therefore if we come here, we know that
- * the PMU state belongs to the current task, therefore
- * we can access the live registers.
- *
- * So in both cases, the live register contains the owner's
- * state. We can ONLY touch the PMU registers and NOT the PSR.
- *
- * As a consequence to this call, the thread->pmds[] array
- * contains stale information which must be ignored
- * when context is reloaded AND monitoring is active (see
- * pfm_restart).
- */
- mask = ctx->ctx_used_pmds[0];
- for (i = 0; mask; i++, mask>>=1) {
- /* skip non used pmds */
- if ((mask & 0x1) == 0) continue;
- val = ia64_get_pmd(i);
-
- if (PMD_IS_COUNTING(i)) {
- /*
- * we rebuild the full 64 bit value of the counter
- */
- ctx->ctx_pmds[i].val += (val & ovfl_mask);
- } else {
- ctx->ctx_pmds[i].val = val;
- }
- DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
- i,
- ctx->ctx_pmds[i].val,
- val & ovfl_mask));
- }
- /*
- * mask monitoring by setting the privilege level to 0
- * we cannot use psr.pp/psr.up for this, it is controlled by
- * the user
- *
- * if task is current, modify actual registers, otherwise modify
- * thread save state, i.e., what will be restored in pfm_load_regs()
- */
- mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
- for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
- if ((mask & 0x1) == 0UL) continue;
- ia64_set_pmc(i, th->pmcs[i] & ~0xfUL);
- th->pmcs[i] &= ~0xfUL;
- DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, th->pmcs[i]));
- }
- /*
- * make all of this visible
- */
- ia64_srlz_d();
-}
-
-/*
- * must always be done with task == current
- *
- * context must be in MASKED state when calling
- */
-static void
-pfm_restore_monitoring(struct task_struct *task)
-{
- pfm_context_t *ctx = PFM_GET_CTX(task);
- struct thread_struct *th = &task->thread;
- unsigned long mask, ovfl_mask;
- unsigned long psr, val;
- int i, is_system;
-
- is_system = ctx->ctx_fl_system;
- ovfl_mask = pmu_conf->ovfl_val;
-
- if (task != current) {
- printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid);
- return;
- }
- if (ctx->ctx_state != PFM_CTX_MASKED) {
- printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
- task->pid, current->pid, ctx->ctx_state);
- return;
- }
- psr = pfm_get_psr();
- /*
- * monitoring is masked via the PMC.
- * As we restore their value, we do not want each counter to
- * restart right away. We stop monitoring using the PSR,
- * restore the PMC (and PMD) and then re-establish the psr
- * as it was. Note that there can be no pending overflow at
- * this point, because monitoring was MASKED.
- *
- * system-wide session are pinned and self-monitoring
- */
- if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
- /* disable dcr pp */
- ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
- pfm_clear_psr_pp();
- } else {
- pfm_clear_psr_up();
- }
- /*
- * first, we restore the PMD
- */
- mask = ctx->ctx_used_pmds[0];
- for (i = 0; mask; i++, mask>>=1) {
- /* skip non used pmds */
- if ((mask & 0x1) == 0) continue;
-
- if (PMD_IS_COUNTING(i)) {
- /*
- * we split the 64bit value according to
- * counter width
- */
- val = ctx->ctx_pmds[i].val & ovfl_mask;
- ctx->ctx_pmds[i].val &= ~ovfl_mask;
- } else {
- val = ctx->ctx_pmds[i].val;
- }
- ia64_set_pmd(i, val);
-
- DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
- i,
- ctx->ctx_pmds[i].val,
- val));
- }
- /*
- * restore the PMCs
- */
- mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
- for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
- if ((mask & 0x1) == 0UL) continue;
- th->pmcs[i] = ctx->ctx_pmcs[i];
- ia64_set_pmc(i, th->pmcs[i]);
- DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, th->pmcs[i]));
- }
- ia64_srlz_d();
-
- /*
- * must restore DBR/IBR because could be modified while masked
- * XXX: need to optimize
- */
- if (ctx->ctx_fl_using_dbreg) {
- pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
- pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
- }
-
- /*
- * now restore PSR
- */
- if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
- /* enable dcr pp */
- ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
- ia64_srlz_i();
- }
- pfm_set_psr_l(psr);
-}
-#endif
-
-static inline void
-pfm_save_pmds(unsigned long *pmds, unsigned long mask)
-{
- int i;
-
- ia64_srlz_d();
-
- for (i=0; mask; i++, mask>>=1) {
- if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
- }
-}
-
-#ifndef XEN
-/*
- * reload from thread state (used for ctxw only)
- */
-static inline void
-pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
-{
- int i;
- unsigned long val, ovfl_val = pmu_conf->ovfl_val;
-
- for (i=0; mask; i++, mask>>=1) {
- if ((mask & 0x1) == 0) continue;
- val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
- ia64_set_pmd(i, val);
- }
- ia64_srlz_d();
-}
-
-/*
- * propagate PMD from context to thread-state
- */
-static inline void
-pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
-{
- struct thread_struct *thread = &task->thread;
- unsigned long ovfl_val = pmu_conf->ovfl_val;
- unsigned long mask = ctx->ctx_all_pmds[0];
- unsigned long val;
- int i;
-
- DPRINT(("mask=0x%lx\n", mask));
-
- for (i=0; mask; i++, mask>>=1) {
-
- val = ctx->ctx_pmds[i].val;
-
- /*
- * We break up the 64 bit value into 2 pieces
- * the lower bits go to the machine state in the
- * thread (will be reloaded on ctxsw in).
- * The upper part stays in the soft-counter.
- */
- if (PMD_IS_COUNTING(i)) {
- ctx->ctx_pmds[i].val = val & ~ovfl_val;
- val &= ovfl_val;
- }
- thread->pmds[i] = val;
-
- DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
- i,
- thread->pmds[i],
- ctx->ctx_pmds[i].val));
- }
-}
-#else
-static inline void
-xenpfm_restore_pmds(pfm_context_t* ctx)
-{
- int i;
- unsigned long ovfl_val = pmu_conf->ovfl_val;
- unsigned long mask = ctx->ctx_all_pmds[0];
- unsigned long val;
-
- for (i = 0; mask; i++, mask >>= 1) {
- if ((mask & 0x1) == 0)
- continue;
-
- val = ctx->ctx_pmds[i].val;
- /*
- * We break up the 64 bit value into 2 pieces
- * the lower bits go to the machine state in the
- * thread (will be reloaded on ctxsw in).
- * The upper part stays in the soft-counter.
- */
- if (PMD_IS_COUNTING(i)) {
- ctx->ctx_pmds[i].val = val & ~ovfl_val;
- val &= ovfl_val;
- }
- ia64_set_pmd(i, val);
- }
- ia64_srlz_d();
-}
-#endif
-
-#ifndef XEN
-/*
- * propagate PMC from context to thread-state
- */
-static inline void
-pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
-{
- struct thread_struct *thread = &task->thread;
- unsigned long mask = ctx->ctx_all_pmcs[0];
- int i;
-
- DPRINT(("mask=0x%lx\n", mask));
-
- for (i=0; mask; i++, mask>>=1) {
- /* masking 0 with ovfl_val yields 0 */
- thread->pmcs[i] = ctx->ctx_pmcs[i];
- DPRINT(("pmc[%d]=0x%lx\n", i, thread->pmcs[i]));
- }
-}
-
-
-
-static inline void
-pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
-{
- int i;
-
- for (i=0; mask; i++, mask>>=1) {
- if ((mask & 0x1) == 0) continue;
- ia64_set_pmc(i, pmcs[i]);
- }
- ia64_srlz_d();
-}
-#else
-static inline void
-xenpfm_restore_pmcs(pfm_context_t* ctx)
-{
- int i;
- unsigned long mask = ctx->ctx_all_pmcs[0];
-
- for (i = 0; mask; i++, mask >>= 1) {
- if ((mask & 0x1) == 0)
- continue;
- ia64_set_pmc(i, ctx->ctx_pmcs[i]);
- DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
- }
- ia64_srlz_d();
-
-}
-#endif
-
-static inline int
-pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
-{
- return memcmp(a, b, sizeof(pfm_uuid_t));
-}
-
-static inline int
-pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
-{
- int ret = 0;
- if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
- return ret;
-}
-
-static inline int
-pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
-{
- int ret = 0;
- if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
- return ret;
-}
-
-
-static inline int
-pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
- int cpu, void *arg)
-{
- int ret = 0;
- if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
- return ret;
-}
-
-static inline int
-pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
- int cpu, void *arg)
-{
- int ret = 0;
- if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
- return ret;
-}
-
-static inline int
-pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
-{
- int ret = 0;
- if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
- return ret;
-}
-
-static inline int
-pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
-{
- int ret = 0;
- if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
- return ret;
-}
-
-static pfm_buffer_fmt_t *
-__pfm_find_buffer_fmt(pfm_uuid_t uuid)
-{
- struct list_head * pos;
- pfm_buffer_fmt_t * entry;
-
- list_for_each(pos, &pfm_buffer_fmt_list) {
- entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
- if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
- return entry;
- }
- return NULL;
-}
-
-/*
- * find a buffer format based on its uuid
- */
-static pfm_buffer_fmt_t *
-pfm_find_buffer_fmt(pfm_uuid_t uuid)
-{
- pfm_buffer_fmt_t * fmt;
- spin_lock(&pfm_buffer_fmt_lock);
- fmt = __pfm_find_buffer_fmt(uuid);
- spin_unlock(&pfm_buffer_fmt_lock);
- return fmt;
-}
-
-int
-pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
-{
- int ret = 0;
-
- /* some sanity checks */
- if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
-
- /* we need at least a handler */
- if (fmt->fmt_handler == NULL) return -EINVAL;
-
- /*
- * XXX: need check validity of fmt_arg_size
- */
-
- spin_lock(&pfm_buffer_fmt_lock);
-
- if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
- printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
- ret = -EBUSY;
- goto out;
- }
- list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
- printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
-
-out:
- spin_unlock(&pfm_buffer_fmt_lock);
- return ret;
-}
-EXPORT_SYMBOL(pfm_register_buffer_fmt);
-
-int
-pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
-{
- pfm_buffer_fmt_t *fmt;
- int ret = 0;
-
- spin_lock(&pfm_buffer_fmt_lock);
-
- fmt = __pfm_find_buffer_fmt(uuid);
- if (!fmt) {
- printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
- ret = -EINVAL;
- goto out;
- }
- list_del_init(&fmt->fmt_list);
- printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
-
-out:
- spin_unlock(&pfm_buffer_fmt_lock);
- return ret;
-
-}
-EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
-
-extern void update_pal_halt_status(int);
-
-static int
-pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
-{
- unsigned long flags;
- /*
- * validy checks on cpu_mask have been done upstream
- */
- LOCK_PFS(flags);
-
- DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
- pfm_sessions.pfs_sys_sessions,
- pfm_sessions.pfs_task_sessions,
- pfm_sessions.pfs_sys_use_dbregs,
- is_syswide,
- cpu));
-
- if (is_syswide) {
- /*
- * cannot mix system wide and per-task sessions
- */
- if (pfm_sessions.pfs_task_sessions > 0UL) {
- DPRINT(("system wide not possible, %u conflicting task_sessions\n",
- pfm_sessions.pfs_task_sessions));
- goto abort;
- }
-
- if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
-
- DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
-
-#ifndef XEN
- pfm_sessions.pfs_sys_session[cpu] = task;
-#else
- pfm_sessions.pfs_sys_session[cpu] = XENOPROF_TASK;
-#endif
-
- pfm_sessions.pfs_sys_sessions++ ;
-
- } else {
- if (pfm_sessions.pfs_sys_sessions) goto abort;
- pfm_sessions.pfs_task_sessions++;
- }
-
- DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
- pfm_sessions.pfs_sys_sessions,
- pfm_sessions.pfs_task_sessions,
- pfm_sessions.pfs_sys_use_dbregs,
- is_syswide,
- cpu));
-
- /*
- * disable default_idle() to go to PAL_HALT
- */
- update_pal_halt_status(0);
-
- UNLOCK_PFS(flags);
-
- return 0;
-
-error_conflict:
- DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
-#ifndef XEN
- pfm_sessions.pfs_sys_session[cpu]->pid,
-#else
- -1,
-#endif
- cpu));
-abort:
- UNLOCK_PFS(flags);
-
- return -EBUSY;
-
-}
-
-static int
-pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
-{
- unsigned long flags;
- /*
- * validy checks on cpu_mask have been done upstream
- */
- LOCK_PFS(flags);
-
- DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
- pfm_sessions.pfs_sys_sessions,
- pfm_sessions.pfs_task_sessions,
- pfm_sessions.pfs_sys_use_dbregs,
- is_syswide,
- cpu));
-
-
- if (is_syswide) {
- pfm_sessions.pfs_sys_session[cpu] = NULL;
- /*
- * would not work with perfmon+more than one bit in cpu_mask
- */
- if (ctx && ctx->ctx_fl_using_dbreg) {
- if (pfm_sessions.pfs_sys_use_dbregs == 0) {
- printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
- } else {
- pfm_sessions.pfs_sys_use_dbregs--;
- }
- }
- pfm_sessions.pfs_sys_sessions--;
- } else {
- pfm_sessions.pfs_task_sessions--;
- }
- DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
- pfm_sessions.pfs_sys_sessions,
- pfm_sessions.pfs_task_sessions,
- pfm_sessions.pfs_sys_use_dbregs,
- is_syswide,
- cpu));
-
- /*
- * if possible, enable default_idle() to go into PAL_HALT
- */
- if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
- update_pal_halt_status(1);
-
- UNLOCK_PFS(flags);
-
- return 0;
-}
-
-#ifndef XEN
-/*
- * removes virtual mapping of the sampling buffer.
- * IMPORTANT: cannot be called with interrupts disable, e.g. inside
- * a PROTECT_CTX() section.
- */
-static int
-pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
-{
- int r;
-
- /* sanity checks */
- if (task->mm == NULL || size == 0UL || vaddr == NULL) {
- printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm);
- return -EINVAL;
- }
-
- DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
-
- /*
- * does the actual unmapping
- */
- down_write(&task->mm->mmap_sem);
-
- DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
-
- r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
-
- up_write(&task->mm->mmap_sem);
- if (r !=0) {
- printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size);
- }
-
- DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
-
- return 0;
-}
-#endif
-
-/*
- * free actual physical storage used by sampling buffer
- */
-#if 0
-static int
-pfm_free_smpl_buffer(pfm_context_t *ctx)
-{
- pfm_buffer_fmt_t *fmt;
-
- if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
-
- /*
- * we won't use the buffer format anymore
- */
- fmt = ctx->ctx_buf_fmt;
-
- DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
- ctx->ctx_smpl_hdr,
- ctx->ctx_smpl_size,
- ctx->ctx_smpl_vaddr));
-
- pfm_buf_fmt_exit(fmt, current, NULL, NULL);
-
- /*
- * free the buffer
- */
- pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
-
- ctx->ctx_smpl_hdr = NULL;
- ctx->ctx_smpl_size = 0UL;
-
- return 0;
-
-invalid_free:
- printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid);
- return -EINVAL;
-}
-#endif
-
-static inline void
-pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
-{
- if (fmt == NULL) return;
-
- pfm_buf_fmt_exit(fmt, current, NULL, NULL);
-
-}
-
-#ifndef XEN
-/*
- * pfmfs should _never_ be mounted by userland - too much of security hassle,
- * no real gain from having the whole whorehouse mounted. So we don't need
- * any operations on the root directory. However, we need a non-trivial
- * d_name - pfm: will go nicely and kill the special-casing in procfs.
- */
-static struct vfsmount *pfmfs_mnt;
-
-static int __init
-init_pfm_fs(void)
-{
- int err = register_filesystem(&pfm_fs_type);
- if (!err) {
- pfmfs_mnt = kern_mount(&pfm_fs_type);
- err = PTR_ERR(pfmfs_mnt);
- if (IS_ERR(pfmfs_mnt))
- unregister_filesystem(&pfm_fs_type);
- else
- err = 0;
- }
- return err;
-}
-
-static void __exit
-exit_pfm_fs(void)
-{
- unregister_filesystem(&pfm_fs_type);
- mntput(pfmfs_mnt);
-}
-
-static ssize_t
-pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
-{
- pfm_context_t *ctx;
- pfm_msg_t *msg;
- ssize_t ret;
- unsigned long flags;
- DECLARE_WAITQUEUE(wait, current);
- if (PFM_IS_FILE(filp) == 0) {
- printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
- return -EINVAL;
- }
-
- ctx = (pfm_context_t *)filp->private_data;
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid);
- return -EINVAL;
- }
-
- /*
- * check even when there is no message
- */
- if (size < sizeof(pfm_msg_t)) {
- DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
- return -EINVAL;
- }
-
- PROTECT_CTX(ctx, flags);
-
- /*
- * put ourselves on the wait queue
- */
- add_wait_queue(&ctx->ctx_msgq_wait, &wait);
-
-
- for(;;) {
- /*
- * check wait queue
- */
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
-
- ret = 0;
- if(PFM_CTXQ_EMPTY(ctx) == 0) break;
-
- UNPROTECT_CTX(ctx, flags);
-
- /*
- * check non-blocking read
- */
- ret = -EAGAIN;
- if(filp->f_flags & O_NONBLOCK) break;
-
- /*
- * check pending signals
- */
- if(signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- /*
- * no message, so wait
- */
- schedule();
-
- PROTECT_CTX(ctx, flags);
- }
- DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret));
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
-
- if (ret < 0) goto abort;
-
- ret = -EINVAL;
- msg = pfm_get_next_msg(ctx);
- if (msg == NULL) {
- printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid);
- goto abort_locked;
- }
-
- DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
-
- ret = -EFAULT;
- if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
-
-abort_locked:
- UNPROTECT_CTX(ctx, flags);
-abort:
- return ret;
-}
-
-static ssize_t
-pfm_write(struct file *file, const char __user *ubuf,
- size_t size, loff_t *ppos)
-{
- DPRINT(("pfm_write called\n"));
- return -EINVAL;
-}
-
-static unsigned int
-pfm_poll(struct file *filp, poll_table * wait)
-{
- pfm_context_t *ctx;
- unsigned long flags;
- unsigned int mask = 0;
-
- if (PFM_IS_FILE(filp) == 0) {
- printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
- return 0;
- }
-
- ctx = (pfm_context_t *)filp->private_data;
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid);
- return 0;
- }
-
-
- DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
-
- poll_wait(filp, &ctx->ctx_msgq_wait, wait);
-
- PROTECT_CTX(ctx, flags);
-
- if (PFM_CTXQ_EMPTY(ctx) == 0)
- mask = POLLIN | POLLRDNORM;
-
- UNPROTECT_CTX(ctx, flags);
-
- DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
-
- return mask;
-}
-
-static int
-pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
-{
- DPRINT(("pfm_ioctl called\n"));
- return -EINVAL;
-}
-
-/*
- * interrupt cannot be masked when coming here
- */
-static inline int
-pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
-{
- int ret;
-
- ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
-
- DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
- current->pid,
- fd,
- on,
- ctx->ctx_async_queue, ret));
-
- return ret;
-}
-
-static int
-pfm_fasync(int fd, struct file *filp, int on)
-{
- pfm_context_t *ctx;
- int ret;
-
- if (PFM_IS_FILE(filp) == 0) {
- printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid);
- return -EBADF;
- }
-
- ctx = (pfm_context_t *)filp->private_data;
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid);
- return -EBADF;
- }
- /*
- * we cannot mask interrupts during this call because this may
- * may go to sleep if memory is not readily avalaible.
- *
- * We are protected from the conetxt disappearing by the get_fd()/put_fd()
- * done in caller. Serialization of this function is ensured by caller.
- */
- ret = pfm_do_fasync(fd, filp, ctx, on);
-
-
- DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
- fd,
- on,
- ctx->ctx_async_queue, ret));
-
- return ret;
-}
-
-#ifdef CONFIG_SMP
-/*
- * this function is exclusively called from pfm_close().
- * The context is not protected at that time, nor are interrupts
- * on the remote CPU. That's necessary to avoid deadlocks.
- */
-static void
-pfm_syswide_force_stop(void *info)
-{
- pfm_context_t *ctx = (pfm_context_t *)info;
- struct pt_regs *regs = task_pt_regs(current);
- struct task_struct *owner;
- unsigned long flags;
- int ret;
-
- if (ctx->ctx_cpu != smp_processor_id()) {
- printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
- ctx->ctx_cpu,
- smp_processor_id());
- return;
- }
- owner = GET_PMU_OWNER();
- if (owner != ctx->ctx_task) {
- printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
- smp_processor_id(),
- owner->pid, ctx->ctx_task->pid);
- return;
- }
- if (GET_PMU_CTX() != ctx) {
- printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
- smp_processor_id(),
- GET_PMU_CTX(), ctx);
- return;
- }
-
- DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid));
- /*
- * the context is already protected in pfm_close(), we simply
- * need to mask interrupts to avoid a PMU interrupt race on
- * this CPU
- */
- local_irq_save(flags);
-
- ret = pfm_context_unload(ctx, NULL, 0, regs);
- if (ret) {
- DPRINT(("context_unload returned %d\n", ret));
- }
-
- /*
- * unmask interrupts, PMU interrupts are now spurious here
- */
- local_irq_restore(flags);
-}
-
-static void
-pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
-{
- int ret;
-
- DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
- ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
- DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
-}
-#endif /* CONFIG_SMP */
-
-/*
- * called for each close(). Partially free resources.
- * When caller is self-monitoring, the context is unloaded.
- */
-static int
-pfm_flush(struct file *filp)
-{
- pfm_context_t *ctx;
- struct task_struct *task;
- struct pt_regs *regs;
- unsigned long flags;
- unsigned long smpl_buf_size = 0UL;
- void *smpl_buf_vaddr = NULL;
- int state, is_system;
-
- if (PFM_IS_FILE(filp) == 0) {
- DPRINT(("bad magic for\n"));
- return -EBADF;
- }
-
- ctx = (pfm_context_t *)filp->private_data;
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid);
- return -EBADF;
- }
-
- /*
- * remove our file from the async queue, if we use this mode.
- * This can be done without the context being protected. We come
- * here when the context has become unreacheable by other tasks.
- *
- * We may still have active monitoring at this point and we may
- * end up in pfm_overflow_handler(). However, fasync_helper()
- * operates with interrupts disabled and it cleans up the
- * queue. If the PMU handler is called prior to entering
- * fasync_helper() then it will send a signal. If it is
- * invoked after, it will find an empty queue and no
- * signal will be sent. In both case, we are safe
- */
- if (filp->f_flags & FASYNC) {
- DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
- pfm_do_fasync (-1, filp, ctx, 0);
- }
-
- PROTECT_CTX(ctx, flags);
-
- state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
-
- task = PFM_CTX_TASK(ctx);
- regs = task_pt_regs(task);
-
- DPRINT(("ctx_state=%d is_current=%d\n",
- state,
- task == current ? 1 : 0));
-
- /*
- * if state == UNLOADED, then task is NULL
- */
-
- /*
- * we must stop and unload because we are losing access to the context.
- */
- if (task == current) {
-#ifdef CONFIG_SMP
- /*
- * the task IS the owner but it migrated to another CPU: that's bad
- * but we must handle this cleanly. Unfortunately, the kernel does
- * not provide a mechanism to block migration (while the context is loaded).
- *
- * We need to release the resource on the ORIGINAL cpu.
- */
- if (is_system && ctx->ctx_cpu != smp_processor_id()) {
-
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- /*
- * keep context protected but unmask interrupt for IPI
- */
- local_irq_restore(flags);
-
- pfm_syswide_cleanup_other_cpu(ctx);
-
- /*
- * restore interrupt masking
- */
- local_irq_save(flags);
-
- /*
- * context is unloaded at this point
- */
- } else
-#endif /* CONFIG_SMP */
- {
-
- DPRINT(("forcing unload\n"));
- /*
- * stop and unload, returning with state UNLOADED
- * and session unreserved.
- */
- pfm_context_unload(ctx, NULL, 0, regs);
-
- DPRINT(("ctx_state=%d\n", ctx->ctx_state));
- }
- }
-
- /*
- * remove virtual mapping, if any, for the calling task.
- * cannot reset ctx field until last user is calling close().
- *
- * ctx_smpl_vaddr must never be cleared because it is needed
- * by every task with access to the context
- *
- * When called from do_exit(), the mm context is gone already, therefore
- * mm is NULL, i.e., the VMA is already gone and we do not have to
- * do anything here
- */
- if (ctx->ctx_smpl_vaddr && current->mm) {
- smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
- smpl_buf_size = ctx->ctx_smpl_size;
- }
-
- UNPROTECT_CTX(ctx, flags);
-
- /*
- * if there was a mapping, then we systematically remove it
- * at this point. Cannot be done inside critical section
- * because some VM function reenables interrupts.
- *
- */
- if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
-
- return 0;
-}
-#endif
-/*
- * called either on explicit close() or from exit_files().
- * Only the LAST user of the file gets to this point, i.e., it is
- * called only ONCE.
- *
- * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
- * (fput()),i.e, last task to access the file. Nobody else can access the
- * file at this point.
- *
- * When called from exit_files(), the VMA has been freed because exit_mm()
- * is executed before exit_files().
- *
- * When called from exit_files(), the current task is not yet ZOMBIE but we
- * flush the PMU state to the context.
- */
-#ifndef XEN
-static int
-pfm_close(struct inode *inode, struct file *filp)
-#else
-static int
-pfm_close(pfm_context_t *ctx)
-#endif
-{
-#ifndef XEN
- pfm_context_t *ctx;
- struct task_struct *task;
- struct pt_regs *regs;
- DECLARE_WAITQUEUE(wait, current);
- unsigned long flags;
-#endif
- unsigned long smpl_buf_size = 0UL;
- void *smpl_buf_addr = NULL;
- int free_possible = 1;
- int state, is_system;
-
-#ifndef XEN
- DPRINT(("pfm_close called private=%p\n", filp->private_data));
-
- if (PFM_IS_FILE(filp) == 0) {
- DPRINT(("bad magic\n"));
- return -EBADF;
- }
-
- ctx = (pfm_context_t *)filp->private_data;
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
- return -EBADF;
- }
-
- PROTECT_CTX(ctx, flags);
-#else
- BUG_ON(!spin_is_locked(&ctx->ctx_lock));
-#endif
-
- state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
-
-#ifndef XEN
- task = PFM_CTX_TASK(ctx);
- regs = task_pt_regs(task);
-
- DPRINT(("ctx_state=%d is_current=%d\n",
- state,
- task == current ? 1 : 0));
-
- /*
- * if task == current, then pfm_flush() unloaded the context
- */
- if (state == PFM_CTX_UNLOADED) goto doit;
-
- /*
- * context is loaded/masked and task != current, we need to
- * either force an unload or go zombie
- */
-
- /*
- * The task is currently blocked or will block after an overflow.
- * we must force it to wakeup to get out of the
- * MASKED state and transition to the unloaded state by itself.
- *
- * This situation is only possible for per-task mode
- */
- if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
-
- /*
- * set a "partial" zombie state to be checked
- * upon return from down() in pfm_handle_work().
- *
- * We cannot use the ZOMBIE state, because it is checked
- * by pfm_load_regs() which is called upon wakeup from down().
- * In such case, it would free the context and then we would
- * return to pfm_handle_work() which would access the
- * stale context. Instead, we set a flag invisible to pfm_load_regs()
- * but visible to pfm_handle_work().
- *
- * For some window of time, we have a zombie context with
- * ctx_state = MASKED and not ZOMBIE
- */
- ctx->ctx_fl_going_zombie = 1;
-
- /*
- * force task to wake up from MASKED state
- */
- complete(&ctx->ctx_restart_done);
-
- DPRINT(("waking up ctx_state=%d\n", state));
-
- /*
- * put ourself to sleep waiting for the other
- * task to report completion
- *
- * the context is protected by mutex, therefore there
- * is no risk of being notified of completion before
- * begin actually on the waitq.
- */
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&ctx->ctx_zombieq, &wait);
-
- UNPROTECT_CTX(ctx, flags);
-
- /*
- * XXX: check for signals :
- * - ok for explicit close
- * - not ok when coming from exit_files()
- */
- schedule();
-
-
- PROTECT_CTX(ctx, flags);
-
-
- remove_wait_queue(&ctx->ctx_zombieq, &wait);
- set_current_state(TASK_RUNNING);
-
- /*
- * context is unloaded at this point
- */
- DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
- }
- else if (task != current) {
-#ifdef CONFIG_SMP
- /*
- * switch context to zombie state
- */
- ctx->ctx_state = PFM_CTX_ZOMBIE;
-
- DPRINT(("zombie ctx for [%d]\n", task->pid));
- /*
- * cannot free the context on the spot. deferred until
- * the task notices the ZOMBIE state
- */
- free_possible = 0;
-#else
- pfm_context_unload(ctx, NULL, 0, regs);
-#endif
- }
-#else
- /* XXX XEN */
- /* unload context */
- BUG_ON(state != PFM_CTX_UNLOADED);
-#endif
-
-#ifndef XEN
-doit:
-#endif
- /* reload state, may have changed during opening of critical section */
- state = ctx->ctx_state;
-
- /*
- * the context is still attached to a task (possibly current)
- * we cannot destroy it right now
- */
-
- /*
- * we must free the sampling buffer right here because
- * we cannot rely on it being cleaned up later by the
- * monitored task. It is not possible to free vmalloc'ed
- * memory in pfm_load_regs(). Instead, we remove the buffer
- * now. should there be subsequent PMU overflow originally
- * meant for sampling, the will be converted to spurious
- * and that's fine because the monitoring tools is gone anyway.
- */
- if (ctx->ctx_smpl_hdr) {
- smpl_buf_addr = ctx->ctx_smpl_hdr;
- smpl_buf_size = ctx->ctx_smpl_size;
- /* no more sampling */
- ctx->ctx_smpl_hdr = NULL;
- ctx->ctx_fl_is_sampling = 0;
- }
-
- DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
- state,
- free_possible,
- smpl_buf_addr,
- smpl_buf_size));
-
- if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
-
- /*
- * UNLOADED that the session has already been unreserved.
- */
- if (state == PFM_CTX_ZOMBIE) {
- pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
- }
-
-#ifndef XEN
- /*
- * disconnect file descriptor from context must be done
- * before we unlock.
- */
- filp->private_data = NULL;
-
- /*
- * if we free on the spot, the context is now completely unreacheable
- * from the callers side. The monitored task side is also cut, so we
- * can freely cut.
- *
- * If we have a deferred free, only the caller side is disconnected.
- */
- UNPROTECT_CTX(ctx, flags);
-
- /*
- * All memory free operations (especially for vmalloc'ed memory)
- * MUST be done with interrupts ENABLED.
- */
- if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
-#else
- UNPROTECT_CTX_NOIRQ(ctx);
-#endif
-
- /*
- * return the memory used by the context
- */
- if (free_possible) pfm_context_free(ctx);
-
- return 0;
-}
-
-#ifndef XEN
-static int
-pfm_no_open(struct inode *irrelevant, struct file *dontcare)
-{
- DPRINT(("pfm_no_open called\n"));
- return -ENXIO;
-}
-
-
-
-static struct file_operations pfm_file_ops = {
- .llseek = no_llseek,
- .read = pfm_read,
- .write = pfm_write,
- .poll = pfm_poll,
- .ioctl = pfm_ioctl,
- .open = pfm_no_open, /* special open code to disallow open via /proc */
- .fasync = pfm_fasync,
- .release = pfm_close,
- .flush = pfm_flush
-};
-
-static int
-pfmfs_delete_dentry(struct dentry *dentry)
-{
- return 1;
-}
-
-static struct dentry_operations pfmfs_dentry_operations = {
- .d_delete = pfmfs_delete_dentry,
-};
-
-
-static int
-pfm_alloc_fd(struct file **cfile)
-{
- int fd, ret = 0;
- struct file *file = NULL;
- struct inode * inode;
- char name[32];
- struct qstr this;
-
- fd = get_unused_fd();
- if (fd < 0) return -ENFILE;
-
- ret = -ENFILE;
-
- file = get_empty_filp();
- if (!file) goto out;
-
- /*
- * allocate a new inode
- */
- inode = new_inode(pfmfs_mnt->mnt_sb);
- if (!inode) goto out;
-
- DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
-
- inode->i_mode = S_IFCHR|S_IRUGO;
- inode->i_uid = current->fsuid;
- inode->i_gid = current->fsgid;
-
- snprintf(name, sizeof(name), "[%lu]", inode->i_ino);
- this.name = name;
- this.len = strlen(name);
- this.hash = inode->i_ino;
-
- ret = -ENOMEM;
-
- /*
- * allocate a new dcache entry
- */
- file->f_dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
- if (!file->f_dentry) goto out;
-
- file->f_dentry->d_op = &pfmfs_dentry_operations;
-
- d_add(file->f_dentry, inode);
- file->f_vfsmnt = mntget(pfmfs_mnt);
- file->f_mapping = inode->i_mapping;
-
- file->f_op = &pfm_file_ops;
- file->f_mode = FMODE_READ;
- file->f_flags = O_RDONLY;
- file->f_pos = 0;
-
- /*
- * may have to delay until context is attached?
- */
- fd_install(fd, file);
-
- /*
- * the file structure we will use
- */
- *cfile = file;
-
- return fd;
-out:
- if (file) put_filp(file);
- put_unused_fd(fd);
- return ret;
-}
-
-static void
-pfm_free_fd(int fd, struct file *file)
-{
- struct files_struct *files = current->files;
- struct fdtable *fdt;
-
- /*
- * there ie no fd_uninstall(), so we do it here
- */
- spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- rcu_assign_pointer(fdt->fd[fd], NULL);
- spin_unlock(&files->file_lock);
-
- if (file)
- put_filp(file);
- put_unused_fd(fd);
-}
-
-static int
-pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
-{
- DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
-
- while (size > 0) {
- unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
-
-
- if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
- return -ENOMEM;
-
- addr += PAGE_SIZE;
- buf += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- return 0;
-}
-#endif
-
-/*
- * allocate a sampling buffer and remaps it into the user address space of the task
- */
-static int
-pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
-{
-#ifndef XEN
- struct mm_struct *mm = task->mm;
- struct vm_area_struct *vma = NULL;
- unsigned long size;
- void *smpl_buf;
-
-
- /*
- * the fixed header + requested size and align to page boundary
- */
- size = PAGE_ALIGN(rsize);
-
- DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
-
- /*
- * check requested size to avoid Denial-of-service attacks
- * XXX: may have to refine this test
- * Check against address space limit.
- *
- * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
- * return -ENOMEM;
- */
- if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
- return -ENOMEM;
-
- /*
- * We do the easy to undo allocations first.
- *
- * pfm_rvmalloc(), clears the buffer, so there is no leak
- */
- smpl_buf = pfm_rvmalloc(size);
- if (smpl_buf == NULL) {
- DPRINT(("Can't allocate sampling buffer\n"));
- return -ENOMEM;
- }
-
- DPRINT(("smpl_buf @%p\n", smpl_buf));
-
- /* allocate vma */
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (!vma) {
- DPRINT(("Cannot allocate vma\n"));
- goto error_kmem;
- }
- memset(vma, 0, sizeof(*vma));
-
- /*
- * partially initialize the vma for the sampling buffer
- */
- vma->vm_mm = mm;
- vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
- vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
-
- /*
- * Now we have everything we need and we can initialize
- * and connect all the data structures
- */
-
- ctx->ctx_smpl_hdr = smpl_buf;
- ctx->ctx_smpl_size = size; /* aligned size */
-
- /*
- * Let's do the difficult operations next.
- *
- * now we atomically find some area in the address space and
- * remap the buffer in it.
- */
- down_write(&task->mm->mmap_sem);
-
- /* find some free area in address space, must have mmap sem held */
- vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
- if (vma->vm_start == 0UL) {
- DPRINT(("Cannot find unmapped area for size %ld\n", size));
- up_write(&task->mm->mmap_sem);
- goto error;
- }
- vma->vm_end = vma->vm_start + size;
- vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
-
- DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
-
- /* can only be applied to current task, need to have the mm semaphore held when called */
- if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
- DPRINT(("Can't remap buffer\n"));
- up_write(&task->mm->mmap_sem);
- goto error;
- }
-
- /*
- * now insert the vma in the vm list for the process, must be
- * done with mmap lock held
- */
- insert_vm_struct(mm, vma);
-
- mm->total_vm += size >> PAGE_SHIFT;
- vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
- vma_pages(vma));
- up_write(&task->mm->mmap_sem);
-
- /*
- * keep track of user level virtual address
- */
- ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
- *(unsigned long *)user_vaddr = vma->vm_start;
-
- return 0;
-
-error:
- kmem_cache_free(vm_area_cachep, vma);
-error_kmem:
- pfm_rvfree(smpl_buf, size);
-
- return -ENOMEM;
-#else
- /* XXX */
- return 0;
-#endif
-}
-
-#ifndef XEN
-/*
- * XXX: do something better here
- */
-static int
-pfm_bad_permissions(struct task_struct *task)
-{
- /* inspired by ptrace_attach() */
- DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
- current->uid,
- current->gid,
- task->euid,
- task->suid,
- task->uid,
- task->egid,
- task->sgid));
-
- return ((current->uid != task->euid)
- || (current->uid != task->suid)
- || (current->uid != task->uid)
- || (current->gid != task->egid)
- || (current->gid != task->sgid)
- || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
-}
-#endif
-
-static int
-pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
-{
- int ctx_flags;
-
- /* valid signal */
-
- ctx_flags = pfx->ctx_flags;
-
- if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
-
- /*
- * cannot block in this mode
- */
- if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
- DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
- return -EINVAL;
- }
- } else {
- }
- /* probably more to add here */
-
- return 0;
-}
-
-static int
-pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags,
- unsigned int cpu, pfarg_context_t *arg)
-{
- pfm_buffer_fmt_t *fmt = NULL;
- unsigned long size = 0UL;
- void *uaddr = NULL;
- void *fmt_arg = NULL;
- int ret = 0;
-#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
-
- /* invoke and lock buffer format, if found */
- fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
- if (fmt == NULL) {
- DPRINT(("[%d] cannot find buffer format\n", task->pid));
- return -EINVAL;
- }
-
- /*
- * buffer argument MUST be contiguous to pfarg_context_t
- */
- if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
-
- ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
-
- DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret));
-
- if (ret) goto error;
-
- /* link buffer format and context */
- ctx->ctx_buf_fmt = fmt;
-
- /*
- * check if buffer format wants to use perfmon buffer allocation/mapping service
- */
- ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
- if (ret) goto error;
-
- if (size) {
- /*
- * buffer is always remapped into the caller's address space
- */
- ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr);
- if (ret) goto error;
-
- /* keep track of user address of buffer */
- arg->ctx_smpl_vaddr = uaddr;
- }
- ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
-
-error:
- return ret;
-}
-
-static void
-pfm_reset_pmu_state(pfm_context_t *ctx)
-{
- int i;
-
- /*
- * install reset values for PMC.
- */
- for (i=1; PMC_IS_LAST(i) == 0; i++) {
- if (PMC_IS_IMPL(i) == 0) continue;
- ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
- DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
- }
- /*
- * PMD registers are set to 0UL when the context in memset()
- */
-
- /*
- * On context switched restore, we must restore ALL pmc and ALL pmd even
- * when they are not actively used by the task. In UP, the incoming process
- * may otherwise pick up left over PMC, PMD state from the previous process.
- * As opposed to PMD, stale PMC can cause harm to the incoming
- * process because they may change what is being measured.
- * Therefore, we must systematically reinstall the entire
- * PMC state. In SMP, the same thing is possible on the
- * same CPU but also on between 2 CPUs.
- *
- * The problem with PMD is information leaking especially
- * to user level when psr.sp=0
- *
- * There is unfortunately no easy way to avoid this problem
- * on either UP or SMP. This definitively slows down the
- * pfm_load_regs() function.
- */
-
- /*
- * bitmask of all PMCs accessible to this context
- *
- * PMC0 is treated differently.
- */
- ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
-
- /*
- * bitmask of all PMDs that are accesible to this context
- */
- ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
-
- DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
-
- /*
- * useful in case of re-enable after disable
- */
- ctx->ctx_used_ibrs[0] = 0UL;
- ctx->ctx_used_dbrs[0] = 0UL;
-}
-
-#ifndef XEN
-static int
-pfm_ctx_getsize(void *arg, size_t *sz)
-{
- pfarg_context_t *req = (pfarg_context_t *)arg;
- pfm_buffer_fmt_t *fmt;
-
- *sz = 0;
-
- if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
-
- fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
- if (fmt == NULL) {
- DPRINT(("cannot find buffer format\n"));
- return -EINVAL;
- }
- /* get just enough to copy in user parameters */
- *sz = fmt->fmt_arg_size;
- DPRINT(("arg_size=%lu\n", *sz));
-
- return 0;
-}
-
-
-
-/*
- * cannot attach if :
- * - kernel task
- * - task not owned by caller
- * - task incompatible with context mode
- */
-static int
-pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
-{
- /*
- * no kernel task or task not owner by caller
- */
- if (task->mm == NULL) {
- DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid));
- return -EPERM;
- }
- if (pfm_bad_permissions(task)) {
- DPRINT(("no permission to attach to [%d]\n", task->pid));
- return -EPERM;
- }
- /*
- * cannot block in self-monitoring mode
- */
- if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
- DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid));
- return -EINVAL;
- }
-
- if (task->exit_state == EXIT_ZOMBIE) {
- DPRINT(("cannot attach to zombie task [%d]\n", task->pid));
- return -EBUSY;
- }
-
- /*
- * always ok for self
- */
- if (task == current) return 0;
-
- if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
- DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
- return -EBUSY;
- }
- /*
- * make sure the task is off any CPU
- */
- wait_task_inactive(task);
-
- /* more to come... */
-
- return 0;
-}
-
-static int
-pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
-{
- struct task_struct *p = current;
- int ret;
-
- /* XXX: need to add more checks here */
- if (pid < 2) return -EPERM;
-
- if (pid != current->pid) {
-
- read_lock(&tasklist_lock);
-
- p = find_task_by_pid(pid);
-
- /* make sure task cannot go away while we operate on it */
- if (p) get_task_struct(p);
-
- read_unlock(&tasklist_lock);
-
- if (p == NULL) return -ESRCH;
- }
-
- ret = pfm_task_incompatible(ctx, p);
- if (ret == 0) {
- *task = p;
- } else if (p != current) {
- pfm_put_task(p);
- }
- return ret;
-}
-#endif
-
-
-#ifndef XEN
-static int
-pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-#else
-static pfm_context_t*
-pfm_context_create(pfarg_context_t* req)
-#endif
-{
-#ifndef XEN
- pfarg_context_t *req = (pfarg_context_t *)arg;
- struct file *filp;
-#else
- pfm_context_t *ctx;
-#endif
- int ctx_flags;
- int ret;
-
-#ifndef XEN
- /* let's check the arguments first */
- ret = pfarg_is_sane(current, req);
- if (ret < 0) return ret;
-#endif
-
- ctx_flags = req->ctx_flags;
-
- ret = -ENOMEM;
-
- ctx = pfm_context_alloc();
- if (!ctx) goto error;
-
-#ifndef XEN
- ret = pfm_alloc_fd(&filp);
- if (ret < 0) goto error_file;
-
- req->ctx_fd = ctx->ctx_fd = ret;
-
- /*
- * attach context to file
- */
- filp->private_data = ctx;
-#endif
-
- /*
- * does the user want to sample?
- */
- if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
- ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req);
- if (ret) goto buffer_error;
- }
-
- /*
- * init context protection lock
- */
- spin_lock_init(&ctx->ctx_lock);
-
- /*
- * context is unloaded
- */
- ctx->ctx_state = PFM_CTX_UNLOADED;
-
- /*
- * initialization of context's flags
- */
- ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
- ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
- ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */
- ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
- /*
- * will move to set properties
- * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
- */
-
-#ifndef XEN
- /*
- * init restart semaphore to locked
- */
- init_completion(&ctx->ctx_restart_done);
-#endif
-
- /*
- * activation is used in SMP only
- */
- ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
- SET_LAST_CPU(ctx, -1);
-
-#ifndef XEN
- /*
- * initialize notification message queue
- */
- ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
- init_waitqueue_head(&ctx->ctx_msgq_wait);
- init_waitqueue_head(&ctx->ctx_zombieq);
-#endif
-
- DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
- ctx,
- ctx_flags,
- ctx->ctx_fl_system,
- ctx->ctx_fl_block,
- ctx->ctx_fl_excl_idle,
- ctx->ctx_fl_no_msg,
- ctx->ctx_fd));
-
- /*
- * initialize soft PMU state
- */
- pfm_reset_pmu_state(ctx);
-
-#ifndef XEN
- return 0;
-#else
- return ctx;
-#endif
-
-buffer_error:
-#ifndef XEN
- pfm_free_fd(ctx->ctx_fd, filp);
-#endif
-
- if (ctx->ctx_buf_fmt) {
-#ifndef XEN
- pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
-#else
- pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, NULL);
-#endif
- }
-#ifndef XEN
-error_file:
-#endif
- pfm_context_free(ctx);
-
-error:
-#ifndef XEN
- return ret;
-#else
- return NULL;
-#endif
-}
-
-static inline unsigned long
-pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
-{
- unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
- unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
- extern unsigned long carta_random32 (unsigned long seed);
-
- if (reg->flags & PFM_REGFL_RANDOM) {
- new_seed = carta_random32(old_seed);
- val -= (old_seed & mask); /* counter values are negative numbers! */
- if ((mask >> 32) != 0)
- /* construct a full 64-bit random value: */
- new_seed |= carta_random32(old_seed >> 32) << 32;
- reg->seed = new_seed;
- }
- reg->lval = val;
- return val;
-}
-
-static void
-pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
-{
- unsigned long mask = ovfl_regs[0];
- unsigned long reset_others = 0UL;
- unsigned long val;
- int i;
-
- /*
- * now restore reset value on sampling overflowed counters
- */
- mask >>= PMU_FIRST_COUNTER;
- for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
-
- if ((mask & 0x1UL) == 0UL) continue;
-
- ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
- reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
-
- DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
- }
-
- /*
- * Now take care of resetting the other registers
- */
- for(i = 0; reset_others; i++, reset_others >>= 1) {
-
- if ((reset_others & 0x1) == 0) continue;
-
- ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
-
- DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
- is_long_reset ? "long" : "short", i, val));
- }
-}
-
-static void
-pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
-{
- unsigned long mask = ovfl_regs[0];
- unsigned long reset_others = 0UL;
- unsigned long val;
- int i;
-
- DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
-
- if (ctx->ctx_state == PFM_CTX_MASKED) {
- pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
- return;
- }
-
- /*
- * now restore reset value on sampling overflowed counters
- */
- mask >>= PMU_FIRST_COUNTER;
- for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
-
- if ((mask & 0x1UL) == 0UL) continue;
-
- val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
- reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
-
- DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
-
- pfm_write_soft_counter(ctx, i, val);
- }
-
- /*
- * Now take care of resetting the other registers
- */
- for(i = 0; reset_others; i++, reset_others >>= 1) {
-
- if ((reset_others & 0x1) == 0) continue;
-
- val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
-
- if (PMD_IS_COUNTING(i)) {
- pfm_write_soft_counter(ctx, i, val);
- } else {
- ia64_set_pmd(i, val);
- }
- DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
- is_long_reset ? "long" : "short", i, val));
- }
- ia64_srlz_d();
-}
-
-static int
-pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
-#ifndef XEN
- struct thread_struct *thread = NULL;
-#endif
- struct task_struct *task;
- pfarg_reg_t *req = (pfarg_reg_t *)arg;
- unsigned long value, pmc_pm;
- unsigned long smpl_pmds, reset_pmds, impl_pmds;
- unsigned int cnum, reg_flags, flags, pmc_type;
- int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
- int is_monitor, is_counting, state;
- int ret = -EINVAL;
- pfm_reg_check_t wr_func;
-#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
-
- state = ctx->ctx_state;
- is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
- is_system = ctx->ctx_fl_system;
- task = ctx->ctx_task;
- impl_pmds = pmu_conf->impl_pmds[0];
-#ifdef XEN
- task = NULL;
- BUG_ON(regs != NULL);
-#endif
-
- if (state == PFM_CTX_ZOMBIE) return -EINVAL;
-
-#ifndef XEN
- if (is_loaded) {
- thread = &task->thread;
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
- can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
- }
-#else
- /* XXX FIXME */
- if (state != PFM_CTX_UNLOADED) {
- gdprintk(XENLOG_DEBUG, "%s state %d\n", __func__, state);
- return -EBUSY;
- }
-#endif
-
- expert_mode = pfm_sysctl.expert_mode;
-
- for (i = 0; i < count; i++, req++) {
-
- cnum = req->reg_num;
- reg_flags = req->reg_flags;
- value = req->reg_value;
- smpl_pmds = req->reg_smpl_pmds[0];
- reset_pmds = req->reg_reset_pmds[0];
- flags = 0;
-
-
- if (cnum >= PMU_MAX_PMCS) {
- DPRINT(("pmc%u is invalid\n", cnum));
- goto error;
- }
-
- pmc_type = pmu_conf->pmc_desc[cnum].type;
- pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
- is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
- is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
-
- /*
- * we reject all non implemented PMC as well
- * as attempts to modify PMC[0-3] which are used
- * as status registers by the PMU
- */
- if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
- DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
- goto error;
- }
- wr_func = pmu_conf->pmc_desc[cnum].write_check;
- /*
- * If the PMC is a monitor, then if the value is not the default:
- * - system-wide session: PMCx.pm=1 (privileged monitor)
- * - per-task : PMCx.pm=0 (user monitor)
- */
- if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
- DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
- cnum,
- pmc_pm,
- is_system));
- goto error;
- }
-
- if (is_counting) {
- /*
- * enforce generation of overflow interrupt. Necessary on all
- * CPUs.
- */
- value |= 1 << PMU_PMC_OI;
-
- if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
- flags |= PFM_REGFL_OVFL_NOTIFY;
- }
-
- if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
-
- /* verify validity of smpl_pmds */
- if ((smpl_pmds & impl_pmds) != smpl_pmds) {
- DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
- goto error;
- }
-
- /* verify validity of reset_pmds */
- if ((reset_pmds & impl_pmds) != reset_pmds) {
- DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
- goto error;
- }
- } else {
- if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
- DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
- goto error;
- }
- /* eventid on non-counting monitors are ignored */
- }
-
- /*
- * execute write checker, if any
- */
- if (likely(expert_mode == 0 && wr_func)) {
- ret = (*wr_func)(task, ctx, cnum, &value, regs);
- if (ret) goto error;
- ret = -EINVAL;
- }
-
- /*
- * no error on this register
- */
- PFM_REG_RETFLAG_SET(req->reg_flags, 0);
-
- /*
- * Now we commit the changes to the software state
- */
-
- /*
- * update overflow information
- */
- if (is_counting) {
- /*
- * full flag update each time a register is programmed
- */
- ctx->ctx_pmds[cnum].flags = flags;
-
- ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
- ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
- ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
-
- /*
- * Mark all PMDS to be accessed as used.
- *
- * We do not keep track of PMC because we have to
- * systematically restore ALL of them.
- *
- * We do not update the used_monitors mask, because
- * if we have not programmed them, then will be in
- * a quiescent state, therefore we will not need to
- * mask/restore then when context is MASKED.
- */
- CTX_USED_PMD(ctx, reset_pmds);
- CTX_USED_PMD(ctx, smpl_pmds);
- /*
- * make sure we do not try to reset on
- * restart because we have established new values
- */
- if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
- }
- /*
- * Needed in case the user does not initialize the equivalent
- * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
- * possible leak here.
- */
- CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
-
- /*
- * keep track of the monitor PMC that we are using.
- * we save the value of the pmc in ctx_pmcs[] and if
- * the monitoring is not stopped for the context we also
- * place it in the saved state area so that it will be
- * picked up later by the context switch code.
- *
- * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
- *
- * The value in thread->pmcs[] may be modified on overflow, i.e., when
- * monitoring needs to be stopped.
- */
- if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
-
- /*
- * update context state
- */
- ctx->ctx_pmcs[cnum] = value;
-
-#ifndef XEN
- if (is_loaded) {
- /*
- * write thread state
- */
- if (is_system == 0) thread->pmcs[cnum] = value;
-
- /*
- * write hardware register if we can
- */
- if (can_access_pmu) {
- ia64_set_pmc(cnum, value);
- }
-#ifdef CONFIG_SMP
- else {
- /*
- * per-task SMP only here
- *
- * we are guaranteed that the task is not running on the other CPU,
- * we indicate that this PMD will need to be reloaded if the task
- * is rescheduled on the CPU it ran last on.
- */
- ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
- }
-#endif
- }
-#endif
-
- DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
- cnum,
- value,
- is_loaded,
- can_access_pmu,
- flags,
- ctx->ctx_all_pmcs[0],
- ctx->ctx_used_pmds[0],
- ctx->ctx_pmds[cnum].eventid,
- smpl_pmds,
- reset_pmds,
- ctx->ctx_reload_pmcs[0],
- ctx->ctx_used_monitors[0],
- ctx->ctx_ovfl_regs[0]));
- }
-
- /*
- * make sure the changes are visible
- */
- if (can_access_pmu) ia64_srlz_d();
-
- return 0;
-error:
- PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
- return ret;
-}
-
-static int
-pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
-#ifndef XEN
- struct thread_struct *thread = NULL;
-#endif
- struct task_struct *task;
- pfarg_reg_t *req = (pfarg_reg_t *)arg;
- unsigned long value, hw_value, ovfl_mask;
- unsigned int cnum;
- int i, can_access_pmu = 0, state;
- int is_counting, is_loaded, is_system, expert_mode;
- int ret = -EINVAL;
- pfm_reg_check_t wr_func;
-
-
- state = ctx->ctx_state;
- is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
- is_system = ctx->ctx_fl_system;
- ovfl_mask = pmu_conf->ovfl_val;
- task = ctx->ctx_task;
-#ifdef XEN
- task = NULL;
- BUG_ON(regs != NULL);
-#endif
-
- if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
-
-#ifndef XEN
- /*
- * on both UP and SMP, we can only write to the PMC when the task is
- * the owner of the local PMU.
- */
- if (likely(is_loaded)) {
- thread = &task->thread;
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
- can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
- }
-#else
- /* XXX FIXME */
- if (state != PFM_CTX_UNLOADED) {
- gdprintk(XENLOG_DEBUG, "%s state %d\n", __func__, state);
- return -EBUSY;
- }
-#endif
- expert_mode = pfm_sysctl.expert_mode;
-
- for (i = 0; i < count; i++, req++) {
-
- cnum = req->reg_num;
- value = req->reg_value;
-
- if (!PMD_IS_IMPL(cnum)) {
- DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
- goto abort_mission;
- }
- is_counting = PMD_IS_COUNTING(cnum);
- wr_func = pmu_conf->pmd_desc[cnum].write_check;
-
- /*
- * execute write checker, if any
- */
- if (unlikely(expert_mode == 0 && wr_func)) {
- unsigned long v = value;
-
- ret = (*wr_func)(task, ctx, cnum, &v, regs);
- if (ret) goto abort_mission;
-
- value = v;
- ret = -EINVAL;
- }
-
- /*
- * no error on this register
- */
- PFM_REG_RETFLAG_SET(req->reg_flags, 0);
-
- /*
- * now commit changes to software state
- */
- hw_value = value;
-
- /*
- * update virtualized (64bits) counter
- */
- if (is_counting) {
- /*
- * write context state
- */
- ctx->ctx_pmds[cnum].lval = value;
-
- /*
- * when context is load we use the split value
- */
- if (is_loaded) {
- hw_value = value & ovfl_mask;
- value = value & ~ovfl_mask;
- }
- }
- /*
- * update reset values (not just for counters)
- */
- ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
- ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
-
- /*
- * update randomization parameters (not just for counters)
- */
- ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
- ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
-
- /*
- * update context value
- */
- ctx->ctx_pmds[cnum].val = value;
-
- /*
- * Keep track of what we use
- *
- * We do not keep track of PMC because we have to
- * systematically restore ALL of them.
- */
- CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
-
- /*
- * mark this PMD register used as well
- */
- CTX_USED_PMD(ctx, RDEP(cnum));
-
- /*
- * make sure we do not try to reset on
- * restart because we have established new values
- */
- if (is_counting && state == PFM_CTX_MASKED) {
- ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
- }
-
- /* XXX FIXME */
-#ifndef XEN
- if (is_loaded) {
- /*
- * write thread state
- */
- if (is_system == 0) thread->pmds[cnum] = hw_value;
-
- /*
- * write hardware register if we can
- */
- if (can_access_pmu) {
- ia64_set_pmd(cnum, hw_value);
- } else {
-#ifdef CONFIG_SMP
- /*
- * we are guaranteed that the task is not running on the other CPU,
- * we indicate that this PMD will need to be reloaded if the task
- * is rescheduled on the CPU it ran last on.
- */
- ctx->ctx_reload_pmds[0] |= 1UL << cnum;
-#endif
- }
- }
-#endif
-
- DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
- "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
- cnum,
- value,
- is_loaded,
- can_access_pmu,
- hw_value,
- ctx->ctx_pmds[cnum].val,
- ctx->ctx_pmds[cnum].short_reset,
- ctx->ctx_pmds[cnum].long_reset,
- PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
- ctx->ctx_pmds[cnum].seed,
- ctx->ctx_pmds[cnum].mask,
- ctx->ctx_used_pmds[0],
- ctx->ctx_pmds[cnum].reset_pmds[0],
- ctx->ctx_reload_pmds[0],
- ctx->ctx_all_pmds[0],
- ctx->ctx_ovfl_regs[0]));
- }
-
- /*
- * make changes visible
- */
- if (can_access_pmu) ia64_srlz_d();
-
- return 0;
-
-abort_mission:
- /*
- * for now, we have only one possibility for error
- */
- PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
- return ret;
-}
-
-#ifndef XEN
-/*
- * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
- * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
- * interrupt is delivered during the call, it will be kept pending until we leave, making
- * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
- * guaranteed to return consistent data to the user, it may simply be old. It is not
- * trivial to treat the overflow while inside the call because you may end up in
- * some module sampling buffer code causing deadlocks.
- */
-static int
-pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct thread_struct *thread = NULL;
- struct task_struct *task;
- unsigned long val = 0UL, lval, ovfl_mask, sval;
- pfarg_reg_t *req = (pfarg_reg_t *)arg;
- unsigned int cnum, reg_flags = 0;
- int i, can_access_pmu = 0, state;
- int is_loaded, is_system, is_counting, expert_mode;
- int ret = -EINVAL;
- pfm_reg_check_t rd_func;
-
- /*
- * access is possible when loaded only for
- * self-monitoring tasks or in UP mode
- */
-
- state = ctx->ctx_state;
- is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
- is_system = ctx->ctx_fl_system;
- ovfl_mask = pmu_conf->ovfl_val;
- task = ctx->ctx_task;
-
- if (state == PFM_CTX_ZOMBIE) return -EINVAL;
-
- if (likely(is_loaded)) {
- thread = &task->thread;
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
- /*
- * this can be true when not self-monitoring only in UP
- */
- can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
-
- if (can_access_pmu) ia64_srlz_d();
- }
- expert_mode = pfm_sysctl.expert_mode;
-
- DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
- is_loaded,
- can_access_pmu,
- state));
-
- /*
- * on both UP and SMP, we can only read the PMD from the hardware register when
- * the task is the owner of the local PMU.
- */
-
- for (i = 0; i < count; i++, req++) {
-
- cnum = req->reg_num;
- reg_flags = req->reg_flags;
-
- if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
- /*
- * we can only read the register that we use. That includes
- * the one we explicitely initialize AND the one we want included
- * in the sampling buffer (smpl_regs).
- *
- * Having this restriction allows optimization in the ctxsw routine
- * without compromising security (leaks)
- */
- if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
-
- sval = ctx->ctx_pmds[cnum].val;
- lval = ctx->ctx_pmds[cnum].lval;
- is_counting = PMD_IS_COUNTING(cnum);
-
- /*
- * If the task is not the current one, then we check if the
- * PMU state is still in the local live register due to lazy ctxsw.
- * If true, then we read directly from the registers.
- */
- if (can_access_pmu){
- val = ia64_get_pmd(cnum);
- } else {
- /*
- * context has been saved
- * if context is zombie, then task does not exist anymore.
- * In this case, we use the full value saved in the context (pfm_flush_regs()).
- */
- val = is_loaded ? thread->pmds[cnum] : 0UL;
- }
- rd_func = pmu_conf->pmd_desc[cnum].read_check;
-
- if (is_counting) {
- /*
- * XXX: need to check for overflow when loaded
- */
- val &= ovfl_mask;
- val += sval;
- }
-
- /*
- * execute read checker, if any
- */
- if (unlikely(expert_mode == 0 && rd_func)) {
- unsigned long v = val;
- ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
- if (ret) goto error;
- val = v;
- ret = -EINVAL;
- }
-
- PFM_REG_RETFLAG_SET(reg_flags, 0);
-
- DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
-
- /*
- * update register return value, abort all if problem during copy.
- * we only modify the reg_flags field. no check mode is fine because
- * access has been verified upfront in sys_perfmonctl().
- */
- req->reg_value = val;
- req->reg_flags = reg_flags;
- req->reg_last_reset_val = lval;
- }
-
- return 0;
-
-error:
- PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
- return ret;
-}
-
-int
-pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
-{
- pfm_context_t *ctx;
-
- if (req == NULL) return -EINVAL;
-
- ctx = GET_PMU_CTX();
-
- if (ctx == NULL) return -EINVAL;
-
- /*
- * for now limit to current task, which is enough when calling
- * from overflow handler
- */
- if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
-
- return pfm_write_pmcs(ctx, req, nreq, regs);
-}
-EXPORT_SYMBOL(pfm_mod_write_pmcs);
-
-int
-pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
-{
- pfm_context_t *ctx;
-
- if (req == NULL) return -EINVAL;
-
- ctx = GET_PMU_CTX();
-
- if (ctx == NULL) return -EINVAL;
-
- /*
- * for now limit to current task, which is enough when calling
- * from overflow handler
- */
- if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
-
- return pfm_read_pmds(ctx, req, nreq, regs);
-}
-EXPORT_SYMBOL(pfm_mod_read_pmds);
-#endif
-
-/*
- * Only call this function when a process it trying to
- * write the debug registers (reading is always allowed)
- */
-int
-pfm_use_debug_registers(struct task_struct *task)
-{
- pfm_context_t *ctx = task->thread.pfm_context;
- unsigned long flags;
- int ret = 0;
-
- if (pmu_conf->use_rr_dbregs == 0) return 0;
-
- DPRINT(("called for [%d]\n", task->pid));
-
- /*
- * do it only once
- */
- if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
-
- /*
- * Even on SMP, we do not need to use an atomic here because
- * the only way in is via ptrace() and this is possible only when the
- * process is stopped. Even in the case where the ctxsw out is not totally
- * completed by the time we come here, there is no way the 'stopped' process
- * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
- * So this is always safe.
- */
- if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
-
- LOCK_PFS(flags);
-
- /*
- * We cannot allow setting breakpoints when system wide monitoring
- * sessions are using the debug registers.
- */
- if (pfm_sessions.pfs_sys_use_dbregs> 0)
- ret = -1;
- else
- pfm_sessions.pfs_ptrace_use_dbregs++;
-
- DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
- pfm_sessions.pfs_ptrace_use_dbregs,
- pfm_sessions.pfs_sys_use_dbregs,
- task->pid, ret));
-
- UNLOCK_PFS(flags);
-
- return ret;
-}
-
-/*
- * This function is called for every task that exits with the
- * IA64_THREAD_DBG_VALID set. This indicates a task which was
- * able to use the debug registers for debugging purposes via
- * ptrace(). Therefore we know it was not using them for
- * perfmormance monitoring, so we only decrement the number
- * of "ptraced" debug register users to keep the count up to date
- */
-int
-pfm_release_debug_registers(struct task_struct *task)
-{
- unsigned long flags;
- int ret;
-
- if (pmu_conf->use_rr_dbregs == 0) return 0;
-
- LOCK_PFS(flags);
- if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
- printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid);
- ret = -1;
- } else {
- pfm_sessions.pfs_ptrace_use_dbregs--;
- ret = 0;
- }
- UNLOCK_PFS(flags);
-
- return ret;
-}
-
-#ifndef XEN
-static int
-pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct task_struct *task;
- pfm_buffer_fmt_t *fmt;
- pfm_ovfl_ctrl_t rst_ctrl;
- int state, is_system;
- int ret = 0;
-
- state = ctx->ctx_state;
- fmt = ctx->ctx_buf_fmt;
- is_system = ctx->ctx_fl_system;
- task = PFM_CTX_TASK(ctx);
-
- switch(state) {
- case PFM_CTX_MASKED:
- break;
- case PFM_CTX_LOADED:
- if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
- /* fall through */
- case PFM_CTX_UNLOADED:
- case PFM_CTX_ZOMBIE:
- DPRINT(("invalid state=%d\n", state));
- return -EBUSY;
- default:
- DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
- return -EINVAL;
- }
-
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
-
- /* sanity check */
- if (unlikely(task == NULL)) {
- printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid);
- return -EINVAL;
- }
-
- if (task == current || is_system) {
-
- fmt = ctx->ctx_buf_fmt;
-
- DPRINT(("restarting self %d ovfl=0x%lx\n",
- task->pid,
- ctx->ctx_ovfl_regs[0]));
-
- if (CTX_HAS_SMPL(ctx)) {
-
- prefetch(ctx->ctx_smpl_hdr);
-
- rst_ctrl.bits.mask_monitoring = 0;
- rst_ctrl.bits.reset_ovfl_pmds = 0;
-
- if (state == PFM_CTX_LOADED)
- ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
- else
- ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
- } else {
- rst_ctrl.bits.mask_monitoring = 0;
- rst_ctrl.bits.reset_ovfl_pmds = 1;
- }
-
- if (ret == 0) {
- if (rst_ctrl.bits.reset_ovfl_pmds)
- pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
-
- if (rst_ctrl.bits.mask_monitoring == 0) {
- DPRINT(("resuming monitoring for [%d]\n", task->pid));
-
- if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
- } else {
- DPRINT(("keeping monitoring stopped for [%d]\n", task->pid));
-
- // cannot use pfm_stop_monitoring(task, regs);
- }
- }
- /*
- * clear overflowed PMD mask to remove any stale information
- */
- ctx->ctx_ovfl_regs[0] = 0UL;
-
- /*
- * back to LOADED state
- */
- ctx->ctx_state = PFM_CTX_LOADED;
-
- /*
- * XXX: not really useful for self monitoring
- */
- ctx->ctx_fl_can_restart = 0;
-
- return 0;
- }
-
- /*
- * restart another task
- */
-
- /*
- * When PFM_CTX_MASKED, we cannot issue a restart before the previous
- * one is seen by the task.
- */
- if (state == PFM_CTX_MASKED) {
- if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
- /*
- * will prevent subsequent restart before this one is
- * seen by other task
- */
- ctx->ctx_fl_can_restart = 0;
- }
-
- /*
- * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
- * the task is blocked or on its way to block. That's the normal
- * restart path. If the monitoring is not masked, then the task
- * can be actively monitoring and we cannot directly intervene.
- * Therefore we use the trap mechanism to catch the task and
- * force it to reset the buffer/reset PMDs.
- *
- * if non-blocking, then we ensure that the task will go into
- * pfm_handle_work() before returning to user mode.
- *
- * We cannot explicitely reset another task, it MUST always
- * be done by the task itself. This works for system wide because
- * the tool that is controlling the session is logically doing
- * "self-monitoring".
- */
- if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
- DPRINT(("unblocking [%d] \n", task->pid));
- complete(&ctx->ctx_restart_done);
- } else {
- DPRINT(("[%d] armed exit trap\n", task->pid));
-
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
-
- PFM_SET_WORK_PENDING(task, 1);
-
- pfm_set_task_notify(task);
-
- /*
- * XXX: send reschedule if task runs on another CPU
- */
- }
- return 0;
-}
-
-static int
-pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- unsigned int m = *(unsigned int *)arg;
-
- pfm_sysctl.debug = m == 0 ? 0 : 1;
-
- printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
-
- if (m == 0) {
- memset(pfm_stats, 0, sizeof(pfm_stats));
- for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
- }
- return 0;
-}
-#endif
-
-/*
- * arg can be NULL and count can be zero for this function
- */
-static int
-pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
-#ifndef XEN
- struct thread_struct *thread = NULL;
-#endif
- struct task_struct *task;
- pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
- unsigned long flags;
- dbreg_t dbreg;
- unsigned int rnum;
- int first_time;
- int ret = 0, state;
- int i, can_access_pmu = 0;
- int is_system, is_loaded;
-
- if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
-
- state = ctx->ctx_state;
- is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
- is_system = ctx->ctx_fl_system;
- task = ctx->ctx_task;
-#ifdef XEN
- task = NULL;
- BUG_ON(regs != NULL);
- /* currently dbrs, ibrs aren't supported */
- BUG();
-#endif
-
- if (state == PFM_CTX_ZOMBIE) return -EINVAL;
-
- /*
- * on both UP and SMP, we can only write to the PMC when the task is
- * the owner of the local PMU.
- */
- if (is_loaded) {
-#ifdef XEN
- /* XXX */
- return -EBUSY;
-#else
- thread = &task->thread;
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
- can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
-#endif
- }
-
- /*
- * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
- * ensuring that no real breakpoint can be installed via this call.
- *
- * IMPORTANT: regs can be NULL in this function
- */
-
- first_time = ctx->ctx_fl_using_dbreg == 0;
-
- /*
- * don't bother if we are loaded and task is being debugged
- */
-#ifndef XEN
- if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
- DPRINT(("debug registers already in use for [%d]\n", task->pid));
- return -EBUSY;
- }
-#else
- /* Currently no support for is_loaded, see -EBUSY above */
-#endif
-
- /*
- * check for debug registers in system wide mode
- *
- * If though a check is done in pfm_context_load(),
- * we must repeat it here, in case the registers are
- * written after the context is loaded
- */
- if (is_loaded) {
- LOCK_PFS(flags);
-
- if (first_time && is_system) {
- if (pfm_sessions.pfs_ptrace_use_dbregs)
- ret = -EBUSY;
- else
- pfm_sessions.pfs_sys_use_dbregs++;
- }
- UNLOCK_PFS(flags);
- }
-
- if (ret != 0) return ret;
-
- /*
- * mark ourself as user of the debug registers for
- * perfmon purposes.
- */
- ctx->ctx_fl_using_dbreg = 1;
-
- /*
- * clear hardware registers to make sure we don't
- * pick up stale state.
- *
- * for a system wide session, we do not use
- * thread.dbr, thread.ibr because this process
- * never leaves the current CPU and the state
- * is shared by all processes running on it
- */
- if (first_time && can_access_pmu) {
-#ifndef XEN
- DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid));
-#endif
- for (i=0; i < pmu_conf->num_ibrs; i++) {
- ia64_set_ibr(i, 0UL);
- ia64_dv_serialize_instruction();
- }
- ia64_srlz_i();
- for (i=0; i < pmu_conf->num_dbrs; i++) {
- ia64_set_dbr(i, 0UL);
- ia64_dv_serialize_data();
- }
- ia64_srlz_d();
- }
-
- /*
- * Now install the values into the registers
- */
- for (i = 0; i < count; i++, req++) {
-
- rnum = req->dbreg_num;
- dbreg.val = req->dbreg_value;
-
- ret = -EINVAL;
-
- if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
- DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
- rnum, dbreg.val, mode, i, count));
-
- goto abort_mission;
- }
-
- /*
- * make sure we do not install enabled breakpoint
- */
- if (rnum & 0x1) {
- if (mode == PFM_CODE_RR)
- dbreg.ibr.ibr_x = 0;
- else
- dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
- }
-
- PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
-
- /*
- * Debug registers, just like PMC, can only be modified
- * by a kernel call. Moreover, perfmon() access to those
- * registers are centralized in this routine. The hardware
- * does not modify the value of these registers, therefore,
- * if we save them as they are written, we can avoid having
- * to save them on context switch out. This is made possible
- * by the fact that when perfmon uses debug registers, ptrace()
- * won't be able to modify them concurrently.
- */
- if (mode == PFM_CODE_RR) {
- CTX_USED_IBR(ctx, rnum);
-
- if (can_access_pmu) {
- ia64_set_ibr(rnum, dbreg.val);
- ia64_dv_serialize_instruction();
- }
-
- ctx->ctx_ibrs[rnum] = dbreg.val;
-
- DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
- rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
- } else {
- CTX_USED_DBR(ctx, rnum);
-
- if (can_access_pmu) {
- ia64_set_dbr(rnum, dbreg.val);
- ia64_dv_serialize_data();
- }
- ctx->ctx_dbrs[rnum] = dbreg.val;
-
- DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
- rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
- }
- }
-
- return 0;
-
-abort_mission:
- /*
- * in case it was our first attempt, we undo the global modifications
- */
- if (first_time) {
- LOCK_PFS(flags);
- if (ctx->ctx_fl_system) {
- pfm_sessions.pfs_sys_use_dbregs--;
- }
- UNLOCK_PFS(flags);
- ctx->ctx_fl_using_dbreg = 0;
- }
- /*
- * install error return flag
- */
- PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
-
- return ret;
-}
-
-static int
-pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
-}
-
-static int
-pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
-}
-
-int
-pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
-{
- pfm_context_t *ctx;
-
- if (req == NULL) return -EINVAL;
-
- ctx = GET_PMU_CTX();
-
- if (ctx == NULL) return -EINVAL;
-
- /*
- * for now limit to current task, which is enough when calling
- * from overflow handler
- */
- if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
-
- return pfm_write_ibrs(ctx, req, nreq, regs);
-}
-EXPORT_SYMBOL(pfm_mod_write_ibrs);
-
-int
-pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
-{
- pfm_context_t *ctx;
-
- if (req == NULL) return -EINVAL;
-
- ctx = GET_PMU_CTX();
-
- if (ctx == NULL) return -EINVAL;
-
- /*
- * for now limit to current task, which is enough when calling
- * from overflow handler
- */
- if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
-
- return pfm_write_dbrs(ctx, req, nreq, regs);
-}
-EXPORT_SYMBOL(pfm_mod_write_dbrs);
-
-
-static int
-pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- pfarg_features_t *req = (pfarg_features_t *)arg;
-
- req->ft_version = PFM_VERSION;
- return 0;
-}
-
-#ifndef XEN
-static int
-pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct pt_regs *tregs;
- struct task_struct *task = PFM_CTX_TASK(ctx);
- int state, is_system;
-
- state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
-
- /*
- * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
- */
- if (state == PFM_CTX_UNLOADED) return -EINVAL;
-
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
- DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
- PFM_CTX_TASK(ctx)->pid,
- state,
- is_system));
- /*
- * in system mode, we need to update the PMU directly
- * and the user level state of the caller, which may not
- * necessarily be the creator of the context.
- */
- if (is_system) {
- /*
- * Update local PMU first
- *
- * disable dcr pp
- */
- ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
- ia64_srlz_i();
-
- /*
- * update local cpuinfo
- */
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
-
- /*
- * stop monitoring, does srlz.i
- */
- pfm_clear_psr_pp();
-
- /*
- * stop monitoring in the caller
- */
- ia64_psr(regs)->pp = 0;
-
- return 0;
- }
- /*
- * per-task mode
- */
-
- if (task == current) {
- /* stop monitoring at kernel level */
- pfm_clear_psr_up();
-
- /*
- * stop monitoring at the user level
- */
- ia64_psr(regs)->up = 0;
- } else {
- tregs = task_pt_regs(task);
-
- /*
- * stop monitoring at the user level
- */
- ia64_psr(tregs)->up = 0;
-
- /*
- * monitoring disabled in kernel at next reschedule
- */
- ctx->ctx_saved_psr_up = 0;
- DPRINT(("task=[%d]\n", task->pid));
- }
- return 0;
-}
-
-
-static int
-pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct pt_regs *tregs;
- int state, is_system;
-
- state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
-
- if (state != PFM_CTX_LOADED) return -EINVAL;
-
- /*
- * In system wide and when the context is loaded, access can only happen
- * when the caller is running on the CPU being monitored by the session.
- * It does not have to be the owner (ctx_task) of the context per se.
- */
- if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
- return -EBUSY;
- }
-
- /*
- * in system mode, we need to update the PMU directly
- * and the user level state of the caller, which may not
- * necessarily be the creator of the context.
- */
- if (is_system) {
-
- /*
- * set user level psr.pp for the caller
- */
- ia64_psr(regs)->pp = 1;
-
- /*
- * now update the local PMU and cpuinfo
- */
- PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
-
- /*
- * start monitoring at kernel level
- */
- pfm_set_psr_pp();
-
- /* enable dcr pp */
- ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
- ia64_srlz_i();
-
- return 0;
- }
-
- /*
- * per-process mode
- */
-
- if (ctx->ctx_task == current) {
-
- /* start monitoring at kernel level */
- pfm_set_psr_up();
-
- /*
- * activate monitoring at user level
- */
- ia64_psr(regs)->up = 1;
-
- } else {
- tregs = task_pt_regs(ctx->ctx_task);
-
- /*
- * start monitoring at the kernel level the next
- * time the task is scheduled
- */
- ctx->ctx_saved_psr_up = IA64_PSR_UP;
-
- /*
- * activate monitoring at user level
- */
- ia64_psr(tregs)->up = 1;
- }
- return 0;
-}
-
-static int
-pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- pfarg_reg_t *req = (pfarg_reg_t *)arg;
- unsigned int cnum;
- int i;
- int ret = -EINVAL;
-
- for (i = 0; i < count; i++, req++) {
-
- cnum = req->reg_num;
-
- if (!PMC_IS_IMPL(cnum)) goto abort_mission;
-
- req->reg_value = PMC_DFL_VAL(cnum);
-
- PFM_REG_RETFLAG_SET(req->reg_flags, 0);
-
- DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
- }
- return 0;
-
-abort_mission:
- PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
- return ret;
-}
-
-static int
-pfm_check_task_exist(pfm_context_t *ctx)
-{
- struct task_struct *g, *t;
- int ret = -ESRCH;
-
- read_lock(&tasklist_lock);
-
- do_each_thread (g, t) {
- if (t->thread.pfm_context == ctx) {
- ret = 0;
- break;
- }
- } while_each_thread (g, t);
-
- read_unlock(&tasklist_lock);
-
- DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
-
- return ret;
-}
-#endif
-
-static int
-pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct task_struct *task;
-#ifndef XEN
- struct thread_struct *thread;
-#endif
- struct pfm_context_t *old;
- unsigned long flags;
-#ifndef CONFIG_SMP
- struct task_struct *owner_task = NULL;
-#endif
- pfarg_load_t *req = (pfarg_load_t *)arg;
- unsigned long *pmcs_source, *pmds_source;
- int the_cpu;
- int ret = 0;
- int state, is_system, set_dbregs = 0;
-
- state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
-#ifdef XEN
- task = NULL;
- old = NULL;
- pmcs_source = pmds_source = NULL;
-#ifndef CONFIG_SMP
- owner_task = NULL;
-#endif
- flags = 0;
- BUG_ON(count != 0);
- BUG_ON(regs != NULL);
-#endif
- /*
- * can only load from unloaded or terminated state
- */
- if (state != PFM_CTX_UNLOADED) {
- DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
- req->load_pid,
- ctx->ctx_state));
- return -EBUSY;
- }
-
-#ifndef XEN
- DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
-
- if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
- DPRINT(("cannot use blocking mode on self\n"));
- return -EINVAL;
- }
-
- ret = pfm_get_task(ctx, req->load_pid, &task);
- if (ret) {
- DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
- return ret;
- }
-
- ret = -EINVAL;
-
- /*
- * system wide is self monitoring only
- */
- if (is_system && task != current) {
- DPRINT(("system wide is self monitoring only load_pid=%d\n",
- req->load_pid));
- goto error;
- }
-
- thread = &task->thread;
-#else
- BUG_ON(!spin_is_locked(&ctx->ctx_lock));
- if (!is_system) {
- ret = -EINVAL;
- goto error;
- }
-#endif
-
- ret = 0;
-#ifndef XEN
- /*
- * cannot load a context which is using range restrictions,
- * into a task that is being debugged.
- */
- if (ctx->ctx_fl_using_dbreg) {
- if (thread->flags & IA64_THREAD_DBG_VALID) {
- ret = -EBUSY;
- DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
- goto error;
- }
- LOCK_PFS(flags);
-
- if (is_system) {
- if (pfm_sessions.pfs_ptrace_use_dbregs) {
- DPRINT(("cannot load [%d] dbregs in use\n", task->pid));
- ret = -EBUSY;
- } else {
- pfm_sessions.pfs_sys_use_dbregs++;
- DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs));
- set_dbregs = 1;
- }
- }
-
- UNLOCK_PFS(flags);
-
- if (ret) goto error;
- }
-#else
- BUG_ON(ctx->ctx_fl_using_dbreg);
-#endif
-
- /*
- * SMP system-wide monitoring implies self-monitoring.
- *
- * The programming model expects the task to
- * be pinned on a CPU throughout the session.
- * Here we take note of the current CPU at the
- * time the context is loaded. No call from
- * another CPU will be allowed.
- *
- * The pinning via shed_setaffinity()
- * must be done by the calling task prior
- * to this call.
- *
- * systemwide: keep track of CPU this session is supposed to run on
- */
- the_cpu = ctx->ctx_cpu = smp_processor_id();
-
- ret = -EBUSY;
- /*
- * now reserve the session
- */
- ret = pfm_reserve_session(current, is_system, the_cpu);
- if (ret) goto error;
-
- /*
- * task is necessarily stopped at this point.
- *
- * If the previous context was zombie, then it got removed in
- * pfm_save_regs(). Therefore we should not see it here.
- * If we see a context, then this is an active context
- *
- * XXX: needs to be atomic
- */
-#ifndef XEN
- DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
- thread->pfm_context, ctx));
-
- ret = -EBUSY;
- old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
- if (old != NULL) {
- DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
- goto error_unres;
- }
-
- pfm_reset_msgq(ctx);
-#endif
-
- ctx->ctx_state = PFM_CTX_LOADED;
-
- /*
- * link context to task
- */
- ctx->ctx_task = task;
-
- if (is_system) {
- /*
- * we load as stopped
- */
- PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
-
- if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
- } else {
-#ifndef XEN
- thread->flags |= IA64_THREAD_PM_VALID;
-#else
- BUG();
-#endif
- }
-
-#ifndef XEN
- /*
- * propagate into thread-state
- */
- pfm_copy_pmds(task, ctx);
- pfm_copy_pmcs(task, ctx);
-
- pmcs_source = thread->pmcs;
- pmds_source = thread->pmds;
-
- /*
- * always the case for system-wide
- */
- if (task == current) {
-
- if (is_system == 0) {
-
- /* allow user level control */
- ia64_psr(regs)->sp = 0;
- DPRINT(("clearing psr.sp for [%d]\n", task->pid));
-
- SET_LAST_CPU(ctx, smp_processor_id());
- INC_ACTIVATION();
- SET_ACTIVATION(ctx);
-#ifndef CONFIG_SMP
- /*
- * push the other task out, if any
- */
- owner_task = GET_PMU_OWNER();
- if (owner_task) pfm_lazy_save_regs(owner_task);
-#endif
- }
- /*
- * load all PMD from ctx to PMU (as opposed to thread state)
- * restore all PMC from ctx to PMU
- */
- pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
- pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
-
- ctx->ctx_reload_pmcs[0] = 0UL;
- ctx->ctx_reload_pmds[0] = 0UL;
-
- /*
- * guaranteed safe by earlier check against DBG_VALID
- */
- if (ctx->ctx_fl_using_dbreg) {
- pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
- pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
- }
- /*
- * set new ownership
- */
- SET_PMU_OWNER(task, ctx);
-
- DPRINT(("context loaded on PMU for [%d]\n", task->pid));
- } else {
- /*
- * when not current, task MUST be stopped, so this is safe
- */
- regs = task_pt_regs(task);
-
- /* force a full reload */
- ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
- SET_LAST_CPU(ctx, -1);
-
- /* initial saved psr (stopped) */
- ctx->ctx_saved_psr_up = 0UL;
- ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
- }
-#else
- BUG_ON(!is_system);
-
- /* load pmds, pmcs */
- xenpfm_restore_pmds(ctx);
- xenpfm_restore_pmcs(ctx);
-
- ctx->ctx_reload_pmcs[0] = 0UL;
- ctx->ctx_reload_pmds[0] = 0UL;
-
- BUG_ON(ctx->ctx_fl_using_dbreg);
-
- SET_PMU_OWNER(NULL, ctx);
-#endif
-
- ret = 0;
-
-#ifndef XEN
-error_unres:
- if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
-#endif
-error:
-#ifndef XEN
- /*
- * we must undo the dbregs setting (for system-wide)
- */
- if (ret && set_dbregs) {
- LOCK_PFS(flags);
- pfm_sessions.pfs_sys_use_dbregs--;
- UNLOCK_PFS(flags);
- }
- /*
- * release task, there is now a link with the context
- */
- if (is_system == 0 && task != current) {
- pfm_put_task(task);
-
- if (ret == 0) {
- ret = pfm_check_task_exist(ctx);
- if (ret) {
- ctx->ctx_state = PFM_CTX_UNLOADED;
- ctx->ctx_task = NULL;
- }
- }
- }
-#else
- BUG_ON(set_dbregs);
-#endif
- return ret;
-}
-
-/*
- * in this function, we do not need to increase the use count
- * for the task via get_task_struct(), because we hold the
- * context lock. If the task were to disappear while having
- * a context attached, it would go through pfm_exit_thread()
- * which also grabs the context lock and would therefore be blocked
- * until we are here.
- */
-static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
-
-static int
-pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
-{
- struct task_struct *task = PFM_CTX_TASK(ctx);
- struct pt_regs *tregs;
- int prev_state, is_system;
- int ret;
-
-#ifndef XEN
- DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
-#else
- task = NULL;
- tregs = NULL;
- BUG_ON(arg != NULL);
- BUG_ON(count != 0);
- BUG_ON(regs != NULL);
-#endif
-
- prev_state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
-
- /*
- * unload only when necessary
- */
- if (prev_state == PFM_CTX_UNLOADED) {
- DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
- return 0;
- }
-
- /*
- * clear psr and dcr bits
- */
-#ifndef XEN
- ret = pfm_stop(ctx, NULL, 0, regs);
- if (ret) return ret;
-#else
- /* caller does it by hand */
- ret = 0;
-#endif
-
- ctx->ctx_state = PFM_CTX_UNLOADED;
-
- /*
- * in system mode, we need to update the PMU directly
- * and the user level state of the caller, which may not
- * necessarily be the creator of the context.
- */
- if (is_system) {
-
- /*
- * Update cpuinfo
- *
- * local PMU is taken care of in pfm_stop()
- */
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
-
- /*
- * save PMDs in context
- * release ownership
- */
- pfm_flush_pmds(current, ctx);
-
- /*
- * at this point we are done with the PMU
- * so we can unreserve the resource.
- */
- if (prev_state != PFM_CTX_ZOMBIE)
- pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
-
-#ifndef XEN
- /*
- * disconnect context from task
- */
- task->thread.pfm_context = NULL;
-#endif
- /*
- * disconnect task from context
- */
- ctx->ctx_task = NULL;
-
- /*
- * There is nothing more to cleanup here.
- */
- return 0;
- }
-
-#ifndef XEN
- /*
- * per-task mode
- */
- tregs = task == current ? regs : task_pt_regs(task);
-
- if (task == current) {
- /*
- * cancel user level control
- */
- ia64_psr(regs)->sp = 1;
-
- DPRINT(("setting psr.sp for [%d]\n", task->pid));
- }
- /*
- * save PMDs to context
- * release ownership
- */
- pfm_flush_pmds(task, ctx);
-
- /*
- * at this point we are done with the PMU
- * so we can unreserve the resource.
- *
- * when state was ZOMBIE, we have already unreserved.
- */
- if (prev_state != PFM_CTX_ZOMBIE)
- pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
-
- /*
- * reset activation counter and psr
- */
- ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
- SET_LAST_CPU(ctx, -1);
-
- /*
- * PMU state will not be restored
- */
- task->thread.flags &= ~IA64_THREAD_PM_VALID;
-
- /*
- * break links between context and task
- */
- task->thread.pfm_context = NULL;
- ctx->ctx_task = NULL;
-
- PFM_SET_WORK_PENDING(task, 0);
-
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
- ctx->ctx_fl_can_restart = 0;
- ctx->ctx_fl_going_zombie = 0;
-
- DPRINT(("disconnected [%d] from context\n", task->pid));
-
- return 0;
-#else
- BUG();
- return -EINVAL;
-#endif
-}
-
-
-#ifndef XEN
-/*
- * called only from exit_thread(): task == current
- * we come here only if current has a context attached (loaded or masked)
- */
-void
-pfm_exit_thread(struct task_struct *task)
-{
- pfm_context_t *ctx;
- unsigned long flags;
- struct pt_regs *regs = task_pt_regs(task);
- int ret, state;
- int free_ok = 0;
-
- ctx = PFM_GET_CTX(task);
-
- PROTECT_CTX(ctx, flags);
-
- DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid));
-
- state = ctx->ctx_state;
- switch(state) {
- case PFM_CTX_UNLOADED:
- /*
- * only comes to thios function if pfm_context is not NULL, i.e., cannot
- * be in unloaded state
- */
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
- break;
- case PFM_CTX_LOADED:
- case PFM_CTX_MASKED:
- ret = pfm_context_unload(ctx, NULL, 0, regs);
- if (ret) {
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
- }
- DPRINT(("ctx unloaded for current state was %d\n", state));
-
- pfm_end_notify_user(ctx);
- break;
- case PFM_CTX_ZOMBIE:
- ret = pfm_context_unload(ctx, NULL, 0, regs);
- if (ret) {
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
- }
- free_ok = 1;
- break;
- default:
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
- break;
- }
- UNPROTECT_CTX(ctx, flags);
-
- { u64 psr = pfm_get_psr();
- BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
- BUG_ON(GET_PMU_OWNER());
- BUG_ON(ia64_psr(regs)->up);
- BUG_ON(ia64_psr(regs)->pp);
- }
-
- /*
- * All memory free operations (especially for vmalloc'ed memory)
- * MUST be done with interrupts ENABLED.
- */
- if (free_ok) pfm_context_free(ctx);
-}
-
-/*
- * functions MUST be listed in the increasing order of their index (see permfon.h)
- */
-#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
-#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
-#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
-#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
-#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
-
-static pfm_cmd_desc_t pfm_cmd_tab[]={
-/* 0 */PFM_CMD_NONE,
-/* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
-/* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
-/* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
-/* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
-/* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
-/* 6 */PFM_CMD_NONE,
-/* 7 */PFM_CMD_NONE,
-/* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
-/* 9 */PFM_CMD_NONE,
-/* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
-/* 11 */PFM_CMD_NONE,
-/* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
-/* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
-/* 14 */PFM_CMD_NONE,
-/* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
-/* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
-/* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
-/* 18 */PFM_CMD_NONE,
-/* 19 */PFM_CMD_NONE,
-/* 20 */PFM_CMD_NONE,
-/* 21 */PFM_CMD_NONE,
-/* 22 */PFM_CMD_NONE,
-/* 23 */PFM_CMD_NONE,
-/* 24 */PFM_CMD_NONE,
-/* 25 */PFM_CMD_NONE,
-/* 26 */PFM_CMD_NONE,
-/* 27 */PFM_CMD_NONE,
-/* 28 */PFM_CMD_NONE,
-/* 29 */PFM_CMD_NONE,
-/* 30 */PFM_CMD_NONE,
-/* 31 */PFM_CMD_NONE,
-/* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
-/* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
-};
-#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
-
-static int
-pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
-{
- struct task_struct *task;
- int state, old_state;
-
-recheck:
- state = ctx->ctx_state;
- task = ctx->ctx_task;
-
- if (task == NULL) {
- DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
- return 0;
- }
-
- DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
- ctx->ctx_fd,
- state,
- task->pid,
- task->state, PFM_CMD_STOPPED(cmd)));
-
- /*
- * self-monitoring always ok.
- *
- * for system-wide the caller can either be the creator of the
- * context (to one to which the context is attached to) OR
- * a task running on the same CPU as the session.
- */
- if (task == current || ctx->ctx_fl_system) return 0;
-
- /*
- * we are monitoring another thread
- */
- switch(state) {
- case PFM_CTX_UNLOADED:
- /*
- * if context is UNLOADED we are safe to go
- */
- return 0;
- case PFM_CTX_ZOMBIE:
- /*
- * no command can operate on a zombie context
- */
- DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
- return -EINVAL;
- case PFM_CTX_MASKED:
- /*
- * PMU state has been saved to software even though
- * the thread may still be running.
- */
- if (cmd != PFM_UNLOAD_CONTEXT) return 0;
- }
-
- /*
- * context is LOADED or MASKED. Some commands may need to have
- * the task stopped.
- *
- * We could lift this restriction for UP but it would mean that
- * the user has no guarantee the task would not run between
- * two successive calls to perfmonctl(). That's probably OK.
- * If this user wants to ensure the task does not run, then
- * the task must be stopped.
- */
- if (PFM_CMD_STOPPED(cmd)) {
- if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
- DPRINT(("[%d] task not in stopped state\n", task->pid));
- return -EBUSY;
- }
- /*
- * task is now stopped, wait for ctxsw out
- *
- * This is an interesting point in the code.
- * We need to unprotect the context because
- * the pfm_save_regs() routines needs to grab
- * the same lock. There are danger in doing
- * this because it leaves a window open for
- * another task to get access to the context
- * and possibly change its state. The one thing
- * that is not possible is for the context to disappear
- * because we are protected by the VFS layer, i.e.,
- * get_fd()/put_fd().
- */
- old_state = state;
-
- UNPROTECT_CTX(ctx, flags);
-
- wait_task_inactive(task);
-
- PROTECT_CTX(ctx, flags);
-
- /*
- * we must recheck to verify if state has changed
- */
- if (ctx->ctx_state != old_state) {
- DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
- goto recheck;
- }
- }
- return 0;
-}
-
-/*
- * system-call entry point (must return long)
- */
-asmlinkage long
-sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
-{
- struct file *file = NULL;
- pfm_context_t *ctx = NULL;
- unsigned long flags = 0UL;
- void *args_k = NULL;
- long ret; /* will expand int return types */
- size_t base_sz, sz, xtra_sz = 0;
- int narg, completed_args = 0, call_made = 0, cmd_flags;
- int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
- int (*getsize)(void *arg, size_t *sz);
-#define PFM_MAX_ARGSIZE 4096
-
- /*
- * reject any call if perfmon was disabled at initialization
- */
- if (unlikely(pmu_conf == NULL)) return -ENOSYS;
-
- if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
- DPRINT(("invalid cmd=%d\n", cmd));
- return -EINVAL;
- }
-
- func = pfm_cmd_tab[cmd].cmd_func;
- narg = pfm_cmd_tab[cmd].cmd_narg;
- base_sz = pfm_cmd_tab[cmd].cmd_argsize;
- getsize = pfm_cmd_tab[cmd].cmd_getsize;
- cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
-
- if (unlikely(func == NULL)) {
- DPRINT(("invalid cmd=%d\n", cmd));
- return -EINVAL;
- }
-
- DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
- PFM_CMD_NAME(cmd),
- cmd,
- narg,
- base_sz,
- count));
-
- /*
- * check if number of arguments matches what the command expects
- */
- if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
- return -EINVAL;
-
-restart_args:
- sz = xtra_sz + base_sz*count;
- /*
- * limit abuse to min page size
- */
- if (unlikely(sz > PFM_MAX_ARGSIZE)) {
- printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz);
- return -E2BIG;
- }
-
- /*
- * allocate default-sized argument buffer
- */
- if (likely(count && args_k == NULL)) {
- args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
- if (args_k == NULL) return -ENOMEM;
- }
-
- ret = -EFAULT;
-
- /*
- * copy arguments
- *
- * assume sz = 0 for command without parameters
- */
- if (sz && copy_from_user(args_k, arg, sz)) {
- DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
- goto error_args;
- }
-
- /*
- * check if command supports extra parameters
- */
- if (completed_args == 0 && getsize) {
- /*
- * get extra parameters size (based on main argument)
- */
- ret = (*getsize)(args_k, &xtra_sz);
- if (ret) goto error_args;
-
- completed_args = 1;
-
- DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
-
- /* retry if necessary */
- if (likely(xtra_sz)) goto restart_args;
- }
-
- if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
-
- ret = -EBADF;
-
- file = fget(fd);
- if (unlikely(file == NULL)) {
- DPRINT(("invalid fd %d\n", fd));
- goto error_args;
- }
- if (unlikely(PFM_IS_FILE(file) == 0)) {
- DPRINT(("fd %d not related to perfmon\n", fd));
- goto error_args;
- }
-
- ctx = (pfm_context_t *)file->private_data;
- if (unlikely(ctx == NULL)) {
- DPRINT(("no context for fd %d\n", fd));
- goto error_args;
- }
- prefetch(&ctx->ctx_state);
-
- PROTECT_CTX(ctx, flags);
-
- /*
- * check task is stopped
- */
- ret = pfm_check_task_state(ctx, cmd, flags);
- if (unlikely(ret)) goto abort_locked;
-
-skip_fd:
- ret = (*func)(ctx, args_k, count, task_pt_regs(current));
-
- call_made = 1;
-
-abort_locked:
- if (likely(ctx)) {
- DPRINT(("context unlocked\n"));
- UNPROTECT_CTX(ctx, flags);
- }
-
- /* copy argument back to user, if needed */
- if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
-
-error_args:
- if (file)
- fput(file);
-
- kfree(args_k);
-
- DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
-
- return ret;
-}
-
-static void
-pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
-{
- pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
- pfm_ovfl_ctrl_t rst_ctrl;
- int state;
- int ret = 0;
-
- state = ctx->ctx_state;
- /*
- * Unlock sampling buffer and reset index atomically
- * XXX: not really needed when blocking
- */
- if (CTX_HAS_SMPL(ctx)) {
-
- rst_ctrl.bits.mask_monitoring = 0;
- rst_ctrl.bits.reset_ovfl_pmds = 0;
-
- if (state == PFM_CTX_LOADED)
- ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
- else
- ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
- } else {
- rst_ctrl.bits.mask_monitoring = 0;
- rst_ctrl.bits.reset_ovfl_pmds = 1;
- }
-
- if (ret == 0) {
- if (rst_ctrl.bits.reset_ovfl_pmds) {
- pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
- }
- if (rst_ctrl.bits.mask_monitoring == 0) {
- DPRINT(("resuming monitoring\n"));
- if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
- } else {
- DPRINT(("stopping monitoring\n"));
- //pfm_stop_monitoring(current, regs);
- }
- ctx->ctx_state = PFM_CTX_LOADED;
- }
-}
-
-/*
- * context MUST BE LOCKED when calling
- * can only be called for current
- */
-static void
-pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
-{
- int ret;
-
- DPRINT(("entering for [%d]\n", current->pid));
-
- ret = pfm_context_unload(ctx, NULL, 0, regs);
- if (ret) {
- printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret);
- }
-
- /*
- * and wakeup controlling task, indicating we are now disconnected
- */
- wake_up_interruptible(&ctx->ctx_zombieq);
-
- /*
- * given that context is still locked, the controlling
- * task will only get access when we return from
- * pfm_handle_work().
- */
-}
-
-static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
- /*
- * pfm_handle_work() can be called with interrupts enabled
- * (TIF_NEED_RESCHED) or disabled. The down_interruptible
- * call may sleep, therefore we must re-enable interrupts
- * to avoid deadlocks. It is safe to do so because this function
- * is called ONLY when returning to user level (PUStk=1), in which case
- * there is no risk of kernel stack overflow due to deep
- * interrupt nesting.
- */
-void
-pfm_handle_work(void)
-{
- pfm_context_t *ctx;
- struct pt_regs *regs;
- unsigned long flags, dummy_flags;
- unsigned long ovfl_regs;
- unsigned int reason;
- int ret;
-
- ctx = PFM_GET_CTX(current);
- if (ctx == NULL) {
- printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid);
- return;
- }
-
- PROTECT_CTX(ctx, flags);
-
- PFM_SET_WORK_PENDING(current, 0);
-
- pfm_clear_task_notify();
-
- regs = task_pt_regs(current);
-
- /*
- * extract reason for being here and clear
- */
- reason = ctx->ctx_fl_trap_reason;
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
- ovfl_regs = ctx->ctx_ovfl_regs[0];
-
- DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
-
- /*
- * must be done before we check for simple-reset mode
- */
- if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
-
-
- //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
- if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
-
- /*
- * restore interrupt mask to what it was on entry.
- * Could be enabled/diasbled.
- */
- UNPROTECT_CTX(ctx, flags);
-
- /*
- * force interrupt enable because of down_interruptible()
- */
- local_irq_enable();
-
- DPRINT(("before block sleeping\n"));
-
- /*
- * may go through without blocking on SMP systems
- * if restart has been received already by the time we call down()
- */
- ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
-
- DPRINT(("after block sleeping ret=%d\n", ret));
-
- /*
- * lock context and mask interrupts again
- * We save flags into a dummy because we may have
- * altered interrupts mask compared to entry in this
- * function.
- */
- PROTECT_CTX(ctx, dummy_flags);
-
- /*
- * we need to read the ovfl_regs only after wake-up
- * because we may have had pfm_write_pmds() in between
- * and that can changed PMD values and therefore
- * ovfl_regs is reset for these new PMD values.
- */
- ovfl_regs = ctx->ctx_ovfl_regs[0];
-
- if (ctx->ctx_fl_going_zombie) {
-do_zombie:
- DPRINT(("context is zombie, bailing out\n"));
- pfm_context_force_terminate(ctx, regs);
- goto nothing_to_do;
- }
- /*
- * in case of interruption of down() we don't restart anything
- */
- if (ret < 0) goto nothing_to_do;
-
-skip_blocking:
- pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
- ctx->ctx_ovfl_regs[0] = 0UL;
-
-nothing_to_do:
- /*
- * restore flags as they were upon entry
- */
- UNPROTECT_CTX(ctx, flags);
-}
-
-static int
-pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
-{
- if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
- DPRINT(("ignoring overflow notification, owner is zombie\n"));
- return 0;
- }
-
- DPRINT(("waking up somebody\n"));
-
- if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
-
- /*
- * safe, we are not in intr handler, nor in ctxsw when
- * we come here
- */
- kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
-
- return 0;
-}
-
-static int
-pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
-{
- pfm_msg_t *msg = NULL;
-
- if (ctx->ctx_fl_no_msg == 0) {
- msg = pfm_get_new_msg(ctx);
- if (msg == NULL) {
- printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
- return -1;
- }
-
- msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
- msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
- msg->pfm_ovfl_msg.msg_active_set = 0;
- msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
- msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
- msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
- msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
- msg->pfm_ovfl_msg.msg_tstamp = 0UL;
- }
-
- DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
- msg,
- ctx->ctx_fl_no_msg,
- ctx->ctx_fd,
- ovfl_pmds));
-
- return pfm_notify_user(ctx, msg);
-}
-
-static int
-pfm_end_notify_user(pfm_context_t *ctx)
-{
- pfm_msg_t *msg;
-
- msg = pfm_get_new_msg(ctx);
- if (msg == NULL) {
- printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
- return -1;
- }
- /* no leak */
- memset(msg, 0, sizeof(*msg));
-
- msg->pfm_end_msg.msg_type = PFM_MSG_END;
- msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
- msg->pfm_ovfl_msg.msg_tstamp = 0UL;
-
- DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
- msg,
- ctx->ctx_fl_no_msg,
- ctx->ctx_fd));
-
- return pfm_notify_user(ctx, msg);
-}
-#else
-#define pfm_ovfl_notify_user(ctx, ovfl_pmds) do {} while(0)
-#endif
-
-/*
- * main overflow processing routine.
- * it can be called from the interrupt path or explicitely during the context switch code
- */
-static void
-pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
-{
- pfm_ovfl_arg_t *ovfl_arg;
- unsigned long mask;
- unsigned long old_val, ovfl_val, new_val;
- unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
- unsigned long tstamp;
- pfm_ovfl_ctrl_t ovfl_ctrl;
- unsigned int i, has_smpl;
- int must_notify = 0;
-#ifdef XEN
- BUG_ON(task != NULL);
-#endif
-
- if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
-
- /*
- * sanity test. Should never happen
- */
- if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
-
- tstamp = ia64_get_itc();
- mask = pmc0 >> PMU_FIRST_COUNTER;
- ovfl_val = pmu_conf->ovfl_val;
- has_smpl = CTX_HAS_SMPL(ctx);
-
- DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
- "used_pmds=0x%lx\n",
- pmc0,
- task ? task->pid: -1,
- (regs ? regs->cr_iip : 0),
- CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
- ctx->ctx_used_pmds[0]));
-
-
- /*
- * first we update the virtual counters
- * assume there was a prior ia64_srlz_d() issued
- */
- for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
-
- /* skip pmd which did not overflow */
- if ((mask & 0x1) == 0) continue;
-
- /*
- * Note that the pmd is not necessarily 0 at this point as qualified events
- * may have happened before the PMU was frozen. The residual count is not
- * taken into consideration here but will be with any read of the pmd via
- * pfm_read_pmds().
- */
- old_val = new_val = ctx->ctx_pmds[i].val;
- new_val += 1 + ovfl_val;
- ctx->ctx_pmds[i].val = new_val;
-
- /*
- * check for overflow condition
- */
- if (likely(old_val > new_val)) {
- ovfl_pmds |= 1UL << i;
- if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
- }
-
- DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
- i,
- new_val,
- old_val,
- ia64_get_pmd(i) & ovfl_val,
- ovfl_pmds,
- ovfl_notify));
- }
-
- /*
- * there was no 64-bit overflow, nothing else to do
- */
- if (ovfl_pmds == 0UL) return;
-
- /*
- * reset all control bits
- */
- ovfl_ctrl.val = 0;
- reset_pmds = 0UL;
-
- /*
- * if a sampling format module exists, then we "cache" the overflow by
- * calling the module's handler() routine.
- */
- if (has_smpl) {
- unsigned long start_cycles, end_cycles;
- unsigned long pmd_mask;
- int j, k, ret = 0;
- int this_cpu = smp_processor_id();
-
- pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
- ovfl_arg = &ctx->ctx_ovfl_arg;
-
- prefetch(ctx->ctx_smpl_hdr);
-
- for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
-
- mask = 1UL << i;
-
- if ((pmd_mask & 0x1) == 0) continue;
-
- ovfl_arg->ovfl_pmd = (unsigned char )i;
- ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
- ovfl_arg->active_set = 0;
- ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
- ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
-
- ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
- ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
- ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
-
- /*
- * copy values of pmds of interest. Sampling format may copy them
- * into sampling buffer.
- */
- if (smpl_pmds) {
- for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
- if ((smpl_pmds & 0x1) == 0) continue;
- ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
- DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
- }
- }
-
- pfm_stats[this_cpu].pfm_smpl_handler_calls++;
-
- start_cycles = ia64_get_itc();
-
- /*
- * call custom buffer format record (handler) routine
- */
- ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
-
- end_cycles = ia64_get_itc();
-
- /*
- * For those controls, we take the union because they have
- * an all or nothing behavior.
- */
- ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
- ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
- ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
- /*
- * build the bitmask of pmds to reset now
- */
- if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
-
- pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
- }
- /*
- * when the module cannot handle the rest of the overflows, we abort right here
- */
- if (ret && pmd_mask) {
- DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
- pmd_mask<<PMU_FIRST_COUNTER));
- }
- /*
- * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
- */
- ovfl_pmds &= ~reset_pmds;
- } else {
- /*
- * when no sampling module is used, then the default
- * is to notify on overflow if requested by user
- */
- ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
- ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
- ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
- ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
- /*
- * if needed, we reset all overflowed pmds
- */
- if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
- }
-
- DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
-
- /*
- * reset the requested PMD registers using the short reset values
- */
- if (reset_pmds) {
- unsigned long bm = reset_pmds;
- pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
- }
-
- if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
-#ifndef XEN
- /*
- * keep track of what to reset when unblocking
- */
- ctx->ctx_ovfl_regs[0] = ovfl_pmds;
-
- /*
- * check for blocking context
- */
- if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
-
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
-
- /*
- * set the perfmon specific checking pending work for the task
- */
- PFM_SET_WORK_PENDING(task, 1);
-
- /*
- * when coming from ctxsw, current still points to the
- * previous task, therefore we must work with task and not current.
- */
- pfm_set_task_notify(task);
- }
- /*
- * defer until state is changed (shorten spin window). the context is locked
- * anyway, so the signal receiver would come spin for nothing.
- */
- must_notify = 1;
-#else
- gdprintk(XENLOG_INFO, "%s check!\n", __func__);
-#endif
- }
-
- DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
-#ifndef XEN
- GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
- PFM_GET_WORK_PENDING(task),
-#else
- -1, 0UL,
-#endif
- ctx->ctx_fl_trap_reason,
- ovfl_pmds,
- ovfl_notify,
- ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
- /*
- * in case monitoring must be stopped, we toggle the psr bits
- */
- if (ovfl_ctrl.bits.mask_monitoring) {
-#ifndef XEN
- pfm_mask_monitoring(task);
- ctx->ctx_state = PFM_CTX_MASKED;
- ctx->ctx_fl_can_restart = 1;
-#else
- gdprintk(XENLOG_INFO, "%s check!\n", __func__);
-#endif
- }
-
- /*
- * send notification now
- */
- if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
-
- return;
-
-sanity_check:
- printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
- smp_processor_id(),
- task ? task->pid : -1,
- pmc0);
- return;
-
-stop_monitoring:
- /*
- * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
- * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
- * come here as zombie only if the task is the current task. In which case, we
- * can access the PMU hardware directly.
- *
- * Note that zombies do have PM_VALID set. So here we do the minimal.
- *
- * In case the context was zombified it could not be reclaimed at the time
- * the monitoring program exited. At this point, the PMU reservation has been
- * returned, the sampiing buffer has been freed. We must convert this call
- * into a spurious interrupt. However, we must also avoid infinite overflows
- * by stopping monitoring for this task. We can only come here for a per-task
- * context. All we need to do is to stop monitoring using the psr bits which
- * are always task private. By re-enabling secure montioring, we ensure that
- * the monitored task will not be able to re-activate monitoring.
- * The task will eventually be context switched out, at which point the context
- * will be reclaimed (that includes releasing ownership of the PMU).
- *
- * So there might be a window of time where the number of per-task session is zero
- * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
- * context. This is safe because if a per-task session comes in, it will push this one
- * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
- * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
- * also push our zombie context out.
- *
- * Overall pretty hairy stuff....
- */
- DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1));
- pfm_clear_psr_up();
- ia64_psr(regs)->up = 0;
- ia64_psr(regs)->sp = 1;
- return;
-}
-
-static int
-pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
-{
- struct task_struct *task;
- pfm_context_t *ctx;
- unsigned long flags;
- u64 pmc0;
- int this_cpu = smp_processor_id();
- int retval = 0;
-
- pfm_stats[this_cpu].pfm_ovfl_intr_count++;
-
- /*
- * srlz.d done before arriving here
- */
- pmc0 = ia64_get_pmc(0);
-
-#ifndef XEN
- task = GET_PMU_OWNER();
-#else
- task = NULL;
-#endif
- ctx = GET_PMU_CTX();
-
- /*
- * if we have some pending bits set
- * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
- */
-#ifndef XEN
- if (PMC0_HAS_OVFL(pmc0) && task) {
-#else
- if (PMC0_HAS_OVFL(pmc0)) {
-#endif
- /*
- * we assume that pmc0.fr is always set here
- */
-
- /* sanity check */
- if (!ctx) goto report_spurious1;
-
-#ifndef XEN
- if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
- goto report_spurious2;
-#endif
-
- PROTECT_CTX_NOPRINT(ctx, flags);
-
- pfm_overflow_handler(task, ctx, pmc0, regs);
-
- UNPROTECT_CTX_NOPRINT(ctx, flags);
-
- } else {
- pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
- retval = -1;
- }
- /*
- * keep it unfrozen at all times
- */
- pfm_unfreeze_pmu();
-
- return retval;
-
-report_spurious1:
-#ifndef XEN
- printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
- this_cpu, task->pid);
-#endif
- pfm_unfreeze_pmu();
- return -1;
-#ifndef XEN /* XEN path doesn't take this goto */
-report_spurious2:
- printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
- this_cpu,
- task->pid);
- pfm_unfreeze_pmu();
- return -1;
-#endif
-}
-
-static irqreturn_t
-pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
-{
- unsigned long start_cycles, total_cycles;
- unsigned long min, max;
- int this_cpu;
- int ret;
-
- this_cpu = get_cpu();
- if (likely(!pfm_alt_intr_handler)) {
- min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
- max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
-
- start_cycles = ia64_get_itc();
-
- ret = pfm_do_interrupt_handler(irq, arg, regs);
-
- total_cycles = ia64_get_itc();
-
- /*
- * don't measure spurious interrupts
- */
- if (likely(ret == 0)) {
- total_cycles -= start_cycles;
-
- if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
- if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
-
- pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
- }
- }
- else {
- (*pfm_alt_intr_handler->handler)(irq, arg, regs);
- }
-
- put_cpu_no_resched();
- return IRQ_HANDLED;
-}
-
-#ifndef XEN
-/*
- * /proc/perfmon interface, for debug only
- */
-
-#define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1)
-
-static void *
-pfm_proc_start(struct seq_file *m, loff_t *pos)
-{
- if (*pos == 0) {
- return PFM_PROC_SHOW_HEADER;
- }
-
- while (*pos <= NR_CPUS) {
- if (cpu_online(*pos - 1)) {
- return (void *)*pos;
- }
- ++*pos;
- }
- return NULL;
-}
-
-static void *
-pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return pfm_proc_start(m, pos);
-}
-
-static void
-pfm_proc_stop(struct seq_file *m, void *v)
-{
-}
-
-static void
-pfm_proc_show_header(struct seq_file *m)
-{
- struct list_head * pos;
- pfm_buffer_fmt_t * entry;
- unsigned long flags;
-
- seq_printf(m,
- "perfmon version : %u.%u\n"
- "model : %s\n"
- "fastctxsw : %s\n"
- "expert mode : %s\n"
- "ovfl_mask : 0x%lx\n"
- "PMU flags : 0x%x\n",
- PFM_VERSION_MAJ, PFM_VERSION_MIN,
- pmu_conf->pmu_name,
- pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
- pfm_sysctl.expert_mode > 0 ? "Yes": "No",
- pmu_conf->ovfl_val,
- pmu_conf->flags);
-
- LOCK_PFS(flags);
-
- seq_printf(m,
- "proc_sessions : %u\n"
- "sys_sessions : %u\n"
- "sys_use_dbregs : %u\n"
- "ptrace_use_dbregs : %u\n",
- pfm_sessions.pfs_task_sessions,
- pfm_sessions.pfs_sys_sessions,
- pfm_sessions.pfs_sys_use_dbregs,
- pfm_sessions.pfs_ptrace_use_dbregs);
-
- UNLOCK_PFS(flags);
-
- spin_lock(&pfm_buffer_fmt_lock);
-
- list_for_each(pos, &pfm_buffer_fmt_list) {
- entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
- seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
- entry->fmt_uuid[0],
- entry->fmt_uuid[1],
- entry->fmt_uuid[2],
- entry->fmt_uuid[3],
- entry->fmt_uuid[4],
- entry->fmt_uuid[5],
- entry->fmt_uuid[6],
- entry->fmt_uuid[7],
- entry->fmt_uuid[8],
- entry->fmt_uuid[9],
- entry->fmt_uuid[10],
- entry->fmt_uuid[11],
- entry->fmt_uuid[12],
- entry->fmt_uuid[13],
- entry->fmt_uuid[14],
- entry->fmt_uuid[15],
- entry->fmt_name);
- }
- spin_unlock(&pfm_buffer_fmt_lock);
-
-}
-
-static int
-pfm_proc_show(struct seq_file *m, void *v)
-{
- unsigned long psr;
- unsigned int i;
- int cpu;
-
- if (v == PFM_PROC_SHOW_HEADER) {
- pfm_proc_show_header(m);
- return 0;
- }
-
- /* show info for CPU (v - 1) */
-
- cpu = (long)v - 1;
- seq_printf(m,
- "CPU%-2d overflow intrs : %lu\n"
- "CPU%-2d overflow cycles : %lu\n"
- "CPU%-2d overflow min : %lu\n"
- "CPU%-2d overflow max : %lu\n"
- "CPU%-2d smpl handler calls : %lu\n"
- "CPU%-2d smpl handler cycles : %lu\n"
- "CPU%-2d spurious intrs : %lu\n"
- "CPU%-2d replay intrs : %lu\n"
- "CPU%-2d syst_wide : %d\n"
- "CPU%-2d dcr_pp : %d\n"
- "CPU%-2d exclude idle : %d\n"
- "CPU%-2d owner : %d\n"
- "CPU%-2d context : %p\n"
- "CPU%-2d activations : %lu\n",
- cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
- cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
- cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
- cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
- cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
- cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
- cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
- cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
- cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
- cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
- cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
- cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
- cpu, pfm_get_cpu_data(pmu_ctx, cpu),
- cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
-
- if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
-
- psr = pfm_get_psr();
-
- ia64_srlz_d();
-
- seq_printf(m,
- "CPU%-2d psr : 0x%lx\n"
- "CPU%-2d pmc0 : 0x%lx\n",
- cpu, psr,
- cpu, ia64_get_pmc(0));
-
- for (i=0; PMC_IS_LAST(i) == 0; i++) {
- if (PMC_IS_COUNTING(i) == 0) continue;
- seq_printf(m,
- "CPU%-2d pmc%u : 0x%lx\n"
- "CPU%-2d pmd%u : 0x%lx\n",
- cpu, i, ia64_get_pmc(i),
- cpu, i, ia64_get_pmd(i));
- }
- }
- return 0;
-}
-
-struct seq_operations pfm_seq_ops = {
- .start = pfm_proc_start,
- .next = pfm_proc_next,
- .stop = pfm_proc_stop,
- .show = pfm_proc_show
-};
-
-static int
-pfm_proc_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &pfm_seq_ops);
-}
-#endif
-
-
-/*
- * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
- * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
- * is active or inactive based on mode. We must rely on the value in
- * local_cpu_data->pfm_syst_info
- */
-void
-pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
-{
- struct pt_regs *regs;
- unsigned long dcr;
- unsigned long dcr_pp;
-
- dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
-
- /*
- * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
- * on every CPU, so we can rely on the pid to identify the idle task.
- */
- if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
- regs = task_pt_regs(task);
- ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
- return;
- }
- /*
- * if monitoring has started
- */
- if (dcr_pp) {
- dcr = ia64_getreg(_IA64_REG_CR_DCR);
- /*
- * context switching in?
- */
- if (is_ctxswin) {
- /* mask monitoring for the idle task */
- ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
- pfm_clear_psr_pp();
- ia64_srlz_i();
- return;
- }
- /*
- * context switching out
- * restore monitoring for next task
- *
- * Due to inlining this odd if-then-else construction generates
- * better code.
- */
- ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
- pfm_set_psr_pp();
- ia64_srlz_i();
- }
-}
-
-#ifndef XEN
-#ifdef CONFIG_SMP
-
-static void
-pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
-{
- struct task_struct *task = ctx->ctx_task;
-
- ia64_psr(regs)->up = 0;
- ia64_psr(regs)->sp = 1;
-
- if (GET_PMU_OWNER() == task) {
- DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid));
- SET_PMU_OWNER(NULL, NULL);
- }
-
- /*
- * disconnect the task from the context and vice-versa
- */
- PFM_SET_WORK_PENDING(task, 0);
-
- task->thread.pfm_context = NULL;
- task->thread.flags &= ~IA64_THREAD_PM_VALID;
-
- DPRINT(("force cleanup for [%d]\n", task->pid));
-}
-
-
-/*
- * in 2.6, interrupts are masked when we come here and the runqueue lock is held
- */
-void
-pfm_save_regs(struct task_struct *task)
-{
- pfm_context_t *ctx;
- struct thread_struct *t;
- unsigned long flags;
- u64 psr;
-
-
- ctx = PFM_GET_CTX(task);
- if (ctx == NULL) return;
- t = &task->thread;
-
- /*
- * we always come here with interrupts ALREADY disabled by
- * the scheduler. So we simply need to protect against concurrent
- * access, not CPU concurrency.
- */
- flags = pfm_protect_ctx_ctxsw(ctx);
-
- if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
- struct pt_regs *regs = task_pt_regs(task);
-
- pfm_clear_psr_up();
-
- pfm_force_cleanup(ctx, regs);
-
- BUG_ON(ctx->ctx_smpl_hdr);
-
- pfm_unprotect_ctx_ctxsw(ctx, flags);
-
- pfm_context_free(ctx);
- return;
- }
-
- /*
- * save current PSR: needed because we modify it
- */
- ia64_srlz_d();
- psr = pfm_get_psr();
-
- BUG_ON(psr & (IA64_PSR_I));
-
- /*
- * stop monitoring:
- * This is the last instruction which may generate an overflow
- *
- * We do not need to set psr.sp because, it is irrelevant in kernel.
- * It will be restored from ipsr when going back to user level
- */
- pfm_clear_psr_up();
-
- /*
- * keep a copy of psr.up (for reload)
- */
- ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
-
- /*
- * release ownership of this PMU.
- * PM interrupts are masked, so nothing
- * can happen.
- */
- SET_PMU_OWNER(NULL, NULL);
-
- /*
- * we systematically save the PMD as we have no
- * guarantee we will be schedule at that same
- * CPU again.
- */
- pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
-
- /*
- * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
- * we will need it on the restore path to check
- * for pending overflow.
- */
- t->pmcs[0] = ia64_get_pmc(0);
-
- /*
- * unfreeze PMU if had pending overflows
- */
- if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
-
- /*
- * finally, allow context access.
- * interrupts will still be masked after this call.
- */
- pfm_unprotect_ctx_ctxsw(ctx, flags);
-}
-
-#else /* !CONFIG_SMP */
-void
-pfm_save_regs(struct task_struct *task)
-{
- pfm_context_t *ctx;
- u64 psr;
-
- ctx = PFM_GET_CTX(task);
- if (ctx == NULL) return;
-
- /*
- * save current PSR: needed because we modify it
- */
- psr = pfm_get_psr();
-
- BUG_ON(psr & (IA64_PSR_I));
-
- /*
- * stop monitoring:
- * This is the last instruction which may generate an overflow
- *
- * We do not need to set psr.sp because, it is irrelevant in kernel.
- * It will be restored from ipsr when going back to user level
- */
- pfm_clear_psr_up();
-
- /*
- * keep a copy of psr.up (for reload)
- */
- ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
-}
-
-static void
-pfm_lazy_save_regs (struct task_struct *task)
-{
- pfm_context_t *ctx;
- struct thread_struct *t;
- unsigned long flags;
-
- { u64 psr = pfm_get_psr();
- BUG_ON(psr & IA64_PSR_UP);
- }
-
- ctx = PFM_GET_CTX(task);
- t = &task->thread;
-
- /*
- * we need to mask PMU overflow here to
- * make sure that we maintain pmc0 until
- * we save it. overflow interrupts are
- * treated as spurious if there is no
- * owner.
- *
- * XXX: I don't think this is necessary
- */
- PROTECT_CTX(ctx,flags);
-
- /*
- * release ownership of this PMU.
- * must be done before we save the registers.
- *
- * after this call any PMU interrupt is treated
- * as spurious.
- */
- SET_PMU_OWNER(NULL, NULL);
-
- /*
- * save all the pmds we use
- */
- pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
-
- /*
- * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
- * it is needed to check for pended overflow
- * on the restore path
- */
- t->pmcs[0] = ia64_get_pmc(0);
-
- /*
- * unfreeze PMU if had pending overflows
- */
- if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
-
- /*
- * now get can unmask PMU interrupts, they will
- * be treated as purely spurious and we will not
- * lose any information
- */
- UNPROTECT_CTX(ctx,flags);
-}
-#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_SMP
-/*
- * in 2.6, interrupts are masked when we come here and the runqueue lock is held
- */
-void
-pfm_load_regs (struct task_struct *task)
-{
- pfm_context_t *ctx;
- struct thread_struct *t;
- unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
- unsigned long flags;
- u64 psr, psr_up;
- int need_irq_resend;
-
- ctx = PFM_GET_CTX(task);
- if (unlikely(ctx == NULL)) return;
-
- BUG_ON(GET_PMU_OWNER());
-
- t = &task->thread;
- /*
- * possible on unload
- */
- if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) return;
-
- /*
- * we always come here with interrupts ALREADY disabled by
- * the scheduler. So we simply need to protect against concurrent
- * access, not CPU concurrency.
- */
- flags = pfm_protect_ctx_ctxsw(ctx);
- psr = pfm_get_psr();
-
- need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
-
- BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
- BUG_ON(psr & IA64_PSR_I);
-
- if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
- struct pt_regs *regs = task_pt_regs(task);
-
- BUG_ON(ctx->ctx_smpl_hdr);
-
- pfm_force_cleanup(ctx, regs);
-
- pfm_unprotect_ctx_ctxsw(ctx, flags);
-
- /*
- * this one (kmalloc'ed) is fine with interrupts disabled
- */
- pfm_context_free(ctx);
-
- return;
- }
-
- /*
- * we restore ALL the debug registers to avoid picking up
- * stale state.
- */
- if (ctx->ctx_fl_using_dbreg) {
- pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
- pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
- }
- /*
- * retrieve saved psr.up
- */
- psr_up = ctx->ctx_saved_psr_up;
-
- /*
- * if we were the last user of the PMU on that CPU,
- * then nothing to do except restore psr
- */
- if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
-
- /*
- * retrieve partial reload masks (due to user modifications)
- */
- pmc_mask = ctx->ctx_reload_pmcs[0];
- pmd_mask = ctx->ctx_reload_pmds[0];
-
- } else {
- /*
- * To avoid leaking information to the user level when psr.sp=0,
- * we must reload ALL implemented pmds (even the ones we don't use).
- * In the kernel we only allow PFM_READ_PMDS on registers which
- * we initialized or requested (sampling) so there is no risk there.
- */
- pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
-
- /*
- * ALL accessible PMCs are systematically reloaded, unused registers
- * get their default (from pfm_reset_pmu_state()) values to avoid picking
- * up stale configuration.
- *
- * PMC0 is never in the mask. It is always restored separately.
- */
- pmc_mask = ctx->ctx_all_pmcs[0];
- }
- /*
- * when context is MASKED, we will restore PMC with plm=0
- * and PMD with stale information, but that's ok, nothing
- * will be captured.
- *
- * XXX: optimize here
- */
- if (pmd_mask) pfm_restore_pmds(t->pmds, pmd_mask);
- if (pmc_mask) pfm_restore_pmcs(t->pmcs, pmc_mask);
-
- /*
- * check for pending overflow at the time the state
- * was saved.
- */
- if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
- /*
- * reload pmc0 with the overflow information
- * On McKinley PMU, this will trigger a PMU interrupt
- */
- ia64_set_pmc(0, t->pmcs[0]);
- ia64_srlz_d();
- t->pmcs[0] = 0UL;
-
- /*
- * will replay the PMU interrupt
- */
- if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
-
- pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
- }
-
- /*
- * we just did a reload, so we reset the partial reload fields
- */
- ctx->ctx_reload_pmcs[0] = 0UL;
- ctx->ctx_reload_pmds[0] = 0UL;
-
- SET_LAST_CPU(ctx, smp_processor_id());
-
- /*
- * dump activation value for this PMU
- */
- INC_ACTIVATION();
- /*
- * record current activation for this context
- */
- SET_ACTIVATION(ctx);
-
- /*
- * establish new ownership.
- */
- SET_PMU_OWNER(task, ctx);
-
- /*
- * restore the psr.up bit. measurement
- * is active again.
- * no PMU interrupt can happen at this point
- * because we still have interrupts disabled.
- */
- if (likely(psr_up)) pfm_set_psr_up();
-
- /*
- * allow concurrent access to context
- */
- pfm_unprotect_ctx_ctxsw(ctx, flags);
-}
-#else /* !CONFIG_SMP */
-/*
- * reload PMU state for UP kernels
- * in 2.5 we come here with interrupts disabled
- */
-void
-pfm_load_regs (struct task_struct *task)
-{
- struct thread_struct *t;
- pfm_context_t *ctx;
- struct task_struct *owner;
- unsigned long pmd_mask, pmc_mask;
- u64 psr, psr_up;
- int need_irq_resend;
-
- owner = GET_PMU_OWNER();
- ctx = PFM_GET_CTX(task);
- t = &task->thread;
- psr = pfm_get_psr();
-
- BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
- BUG_ON(psr & IA64_PSR_I);
-
- /*
- * we restore ALL the debug registers to avoid picking up
- * stale state.
- *
- * This must be done even when the task is still the owner
- * as the registers may have been modified via ptrace()
- * (not perfmon) by the previous task.
- */
- if (ctx->ctx_fl_using_dbreg) {
- pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
- pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
- }
-
- /*
- * retrieved saved psr.up
- */
- psr_up = ctx->ctx_saved_psr_up;
- need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
-
- /*
- * short path, our state is still there, just
- * need to restore psr and we go
- *
- * we do not touch either PMC nor PMD. the psr is not touched
- * by the overflow_handler. So we are safe w.r.t. to interrupt
- * concurrency even without interrupt masking.
- */
- if (likely(owner == task)) {
- if (likely(psr_up)) pfm_set_psr_up();
- return;
- }
-
- /*
- * someone else is still using the PMU, first push it out and
- * then we'll be able to install our stuff !
- *
- * Upon return, there will be no owner for the current PMU
- */
- if (owner) pfm_lazy_save_regs(owner);
-
- /*
- * To avoid leaking information to the user level when psr.sp=0,
- * we must reload ALL implemented pmds (even the ones we don't use).
- * In the kernel we only allow PFM_READ_PMDS on registers which
- * we initialized or requested (sampling) so there is no risk there.
- */
- pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
-
- /*
- * ALL accessible PMCs are systematically reloaded, unused registers
- * get their default (from pfm_reset_pmu_state()) values to avoid picking
- * up stale configuration.
- *
- * PMC0 is never in the mask. It is always restored separately
- */
- pmc_mask = ctx->ctx_all_pmcs[0];
-
- pfm_restore_pmds(t->pmds, pmd_mask);
- pfm_restore_pmcs(t->pmcs, pmc_mask);
-
- /*
- * check for pending overflow at the time the state
- * was saved.
- */
- if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
- /*
- * reload pmc0 with the overflow information
- * On McKinley PMU, this will trigger a PMU interrupt
- */
- ia64_set_pmc(0, t->pmcs[0]);
- ia64_srlz_d();
-
- t->pmcs[0] = 0UL;
-
- /*
- * will replay the PMU interrupt
- */
- if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
-
- pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
- }
-
- /*
- * establish new ownership.
- */
- SET_PMU_OWNER(task, ctx);
-
- /*
- * restore the psr.up bit. measurement
- * is active again.
- * no PMU interrupt can happen at this point
- * because we still have interrupts disabled.
- */
- if (likely(psr_up)) pfm_set_psr_up();
-}
-#endif /* CONFIG_SMP */
-#endif /* XEN */
-
-/*
- * this function assumes monitoring is stopped
- */
-static void
-pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
-{
-#ifndef XEN
- u64 pmc0;
- unsigned long mask2, val, pmd_val, ovfl_val;
- int i, can_access_pmu = 0;
- int is_self;
-
- /*
- * is the caller the task being monitored (or which initiated the
- * session for system wide measurements)
- */
- is_self = ctx->ctx_task == task ? 1 : 0;
-
- /*
- * can access PMU is task is the owner of the PMU state on the current CPU
- * or if we are running on the CPU bound to the context in system-wide mode
- * (that is not necessarily the task the context is attached to in this mode).
- * In system-wide we always have can_access_pmu true because a task running on an
- * invalid processor is flagged earlier in the call stack (see pfm_stop).
- */
- can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
- if (can_access_pmu) {
- /*
- * Mark the PMU as not owned
- * This will cause the interrupt handler to do nothing in case an overflow
- * interrupt was in-flight
- * This also guarantees that pmc0 will contain the final state
- * It virtually gives us full control on overflow processing from that point
- * on.
- */
- SET_PMU_OWNER(NULL, NULL);
- DPRINT(("releasing ownership\n"));
-
- /*
- * read current overflow status:
- *
- * we are guaranteed to read the final stable state
- */
- ia64_srlz_d();
- pmc0 = ia64_get_pmc(0); /* slow */
-
- /*
- * reset freeze bit, overflow status information destroyed
- */
- pfm_unfreeze_pmu();
- } else {
- pmc0 = task->thread.pmcs[0];
- /*
- * clear whatever overflow status bits there were
- */
- task->thread.pmcs[0] = 0;
- }
- ovfl_val = pmu_conf->ovfl_val;
- /*
- * we save all the used pmds
- * we take care of overflows for counting PMDs
- *
- * XXX: sampling situation is not taken into account here
- */
- mask2 = ctx->ctx_used_pmds[0];
-
- DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
-
- for (i = 0; mask2; i++, mask2>>=1) {
-
- /* skip non used pmds */
- if ((mask2 & 0x1) == 0) continue;
-
- /*
- * can access PMU always true in system wide mode
- */
- val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : task->thread.pmds[i];
-
- if (PMD_IS_COUNTING(i)) {
- DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
- task->pid,
- i,
- ctx->ctx_pmds[i].val,
- val & ovfl_val));
-
- /*
- * we rebuild the full 64 bit value of the counter
- */
- val = ctx->ctx_pmds[i].val + (val & ovfl_val);
-
- /*
- * now everything is in ctx_pmds[] and we need
- * to clear the saved context from save_regs() such that
- * pfm_read_pmds() gets the correct value
- */
- pmd_val = 0UL;
-
- /*
- * take care of overflow inline
- */
- if (pmc0 & (1UL << i)) {
- val += 1 + ovfl_val;
- DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i));
- }
- }
-
- DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
-
- if (is_self) task->thread.pmds[i] = pmd_val;
-
- ctx->ctx_pmds[i].val = val;
- }
-#else
- /* XXX */
-#endif
-}
-
-static struct irqaction __read_mostly perfmon_irqaction = {
- .handler = pfm_interrupt_handler,
-#ifndef XEN
- .flags = SA_INTERRUPT,
-#endif
- .name = "perfmon"
-};
-
-#ifndef XEN
-static void
-pfm_alt_save_pmu_state(void *data)
-{
- struct pt_regs *regs;
-
- regs = task_pt_regs(current);
-
- DPRINT(("called\n"));
-
- /*
- * should not be necessary but
- * let's take not risk
- */
- pfm_clear_psr_up();
- pfm_clear_psr_pp();
- ia64_psr(regs)->pp = 0;
-
- /*
- * This call is required
- * May cause a spurious interrupt on some processors
- */
- pfm_freeze_pmu();
-
- ia64_srlz_d();
-}
-
-void
-pfm_alt_restore_pmu_state(void *data)
-{
- struct pt_regs *regs;
-
- regs = task_pt_regs(current);
-
- DPRINT(("called\n"));
-
- /*
- * put PMU back in state expected
- * by perfmon
- */
- pfm_clear_psr_up();
- pfm_clear_psr_pp();
- ia64_psr(regs)->pp = 0;
-
- /*
- * perfmon runs with PMU unfrozen at all times
- */
- pfm_unfreeze_pmu();
-
- ia64_srlz_d();
-}
-
-int
-pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
-{
- int ret, i;
- int reserve_cpu;
-
- /* some sanity checks */
- if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
-
- /* do the easy test first */
- if (pfm_alt_intr_handler) return -EBUSY;
-
- /* one at a time in the install or remove, just fail the others */
- if (!spin_trylock(&pfm_alt_install_check)) {
- return -EBUSY;
- }
-
- /* reserve our session */
- for_each_online_cpu(reserve_cpu) {
- ret = pfm_reserve_session(NULL, 1, reserve_cpu);
- if (ret) goto cleanup_reserve;
- }
-
- /* save the current system wide pmu states */
- on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
-
- /* officially change to the alternate interrupt handler */
- pfm_alt_intr_handler = hdl;
-
- spin_unlock(&pfm_alt_install_check);
-
- return 0;
-
-cleanup_reserve:
- for_each_online_cpu(i) {
- /* don't unreserve more than we reserved */
- if (i >= reserve_cpu) break;
-
- pfm_unreserve_session(NULL, 1, i);
- }
-
- spin_unlock(&pfm_alt_install_check);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
-
-int
-pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
-{
- int i;
-
- if (hdl == NULL) return -EINVAL;
-
- /* cannot remove someone else's handler! */
- if (pfm_alt_intr_handler != hdl) return -EINVAL;
-
- /* one at a time in the install or remove, just fail the others */
- if (!spin_trylock(&pfm_alt_install_check)) {
- return -EBUSY;
- }
-
- pfm_alt_intr_handler = NULL;
-
- on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
-
- for_each_online_cpu(i) {
- pfm_unreserve_session(NULL, 1, i);
- }
-
- spin_unlock(&pfm_alt_install_check);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
-#endif
-
-/*
- * perfmon initialization routine, called from the initcall() table
- */
-#ifndef XEN
-static int init_pfm_fs(void);
-#else
-#define init_pfm_fs() do {} while(0)
-#endif
-
-static int __init
-pfm_probe_pmu(void)
-{
- pmu_config_t **p;
- int family;
-
- family = local_cpu_data->family;
- p = pmu_confs;
-
- while(*p) {
- if ((*p)->probe) {
- if ((*p)->probe() == 0) goto found;
- } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
- goto found;
- }
- p++;
- }
- return -1;
-found:
- pmu_conf = *p;
- return 0;
-}
-
-#ifndef XEN
-static struct file_operations pfm_proc_fops = {
- .open = pfm_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-#endif
-
-int __init
-pfm_init(void)
-{
- unsigned int n, n_counters, i;
-
- printk("perfmon: version %u.%u IRQ %u\n",
- PFM_VERSION_MAJ,
- PFM_VERSION_MIN,
- IA64_PERFMON_VECTOR);
-
- if (pfm_probe_pmu()) {
- printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
- local_cpu_data->family);
- return -ENODEV;
- }
-
- /*
- * compute the number of implemented PMD/PMC from the
- * description tables
- */
- n = 0;
- for (i=0; PMC_IS_LAST(i) == 0; i++) {
- if (PMC_IS_IMPL(i) == 0) continue;
- pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
- n++;
- }
- pmu_conf->num_pmcs = n;
-
- n = 0; n_counters = 0;
- for (i=0; PMD_IS_LAST(i) == 0; i++) {
- if (PMD_IS_IMPL(i) == 0) continue;
- pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
- n++;
- if (PMD_IS_COUNTING(i)) n_counters++;
- }
- pmu_conf->num_pmds = n;
- pmu_conf->num_counters = n_counters;
-
- /*
- * sanity checks on the number of debug registers
- */
- if (pmu_conf->use_rr_dbregs) {
- if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
- printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
- pmu_conf = NULL;
- return -1;
- }
- if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
- printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
- pmu_conf = NULL;
- return -1;
- }
- }
-
- printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
- pmu_conf->pmu_name,
- pmu_conf->num_pmcs,
- pmu_conf->num_pmds,
- pmu_conf->num_counters,
- ffz(pmu_conf->ovfl_val));
-
- /* sanity check */
- if (pmu_conf->num_pmds >= IA64_NUM_PMD_REGS || pmu_conf->num_pmcs >= IA64_NUM_PMC_REGS) {
- printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
- pmu_conf = NULL;
- return -1;
- }
-
-#ifndef XEN
- /*
- * create /proc/perfmon (mostly for debugging purposes)
- */
- perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL);
- if (perfmon_dir == NULL) {
- printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
- pmu_conf = NULL;
- return -1;
- }
- /*
- * install customized file operations for /proc/perfmon entry
- */
- perfmon_dir->proc_fops = &pfm_proc_fops;
-
- /*
- * create /proc/sys/kernel/perfmon (for debugging purposes)
- */
- pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0);
-#endif
-
- /*
- * initialize all our spinlocks
- */
- spin_lock_init(&pfm_sessions.pfs_lock);
- spin_lock_init(&pfm_buffer_fmt_lock);
-
- init_pfm_fs();
-
- for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
-
- return 0;
-}
-
-__initcall(pfm_init);
-
-/*
- * this function is called before pfm_init()
- */
-void
-pfm_init_percpu (void)
-{
- /*
- * make sure no measurement is active
- * (may inherit programmed PMCs from EFI).
- */
- pfm_clear_psr_pp();
- pfm_clear_psr_up();
-
- /*
- * we run with the PMU not frozen at all times
- */
- pfm_unfreeze_pmu();
-
- if (smp_processor_id() == 0)
- register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
-
- ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
- ia64_srlz_d();
-}
-
-/*
- * used for debug purposes only
- */
-void
-dump_pmu_state(const char *from)
-{
- struct task_struct *task;
- struct thread_struct *t;
- struct pt_regs *regs;
- pfm_context_t *ctx;
- unsigned long psr, dcr, info, flags;
- int i, this_cpu;
-
- local_irq_save(flags);
-
- this_cpu = smp_processor_id();
- regs = task_pt_regs(current);
- info = PFM_CPUINFO_GET();
- dcr = ia64_getreg(_IA64_REG_CR_DCR);
-
- if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
- local_irq_restore(flags);
- return;
- }
-
-#ifndef XEN
- printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
- this_cpu,
- from,
- current->pid,
- regs->cr_iip,
- current->comm);
-#endif
-
- task = GET_PMU_OWNER();
- ctx = GET_PMU_CTX();
-
- printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx);
-
- psr = pfm_get_psr();
-
- printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
- this_cpu,
- ia64_get_pmc(0),
- psr & IA64_PSR_PP ? 1 : 0,
- psr & IA64_PSR_UP ? 1 : 0,
- dcr & IA64_DCR_PP ? 1 : 0,
- info,
- ia64_psr(regs)->up,
- ia64_psr(regs)->pp);
-
- ia64_psr(regs)->up = 0;
- ia64_psr(regs)->pp = 0;
-
- t = &current->thread;
-
- for (i=1; PMC_IS_LAST(i) == 0; i++) {
- if (PMC_IS_IMPL(i) == 0) continue;
- printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, t->pmcs[i]);
- }
-
- for (i=1; PMD_IS_LAST(i) == 0; i++) {
- if (PMD_IS_IMPL(i) == 0) continue;
- printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, t->pmds[i]);
- }
-
- if (ctx) {
-#ifndef XEN
- printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
- this_cpu,
- ctx->ctx_state,
- ctx->ctx_smpl_vaddr,
- ctx->ctx_smpl_hdr,
- ctx->ctx_msgq_head,
- ctx->ctx_msgq_tail,
- ctx->ctx_saved_psr_up);
-#else
- printk("->CPU%d ctx_state=%d vaddr=%p addr=%p saved_psr_up=0x%lx\n",
- this_cpu,
- ctx->ctx_state,
- ctx->ctx_smpl_vaddr,
- ctx->ctx_smpl_hdr,
- ctx->ctx_saved_psr_up);
-#endif
- }
- local_irq_restore(flags);
-}
-
-#ifndef XEN
-/*
- * called from process.c:copy_thread(). task is new child.
- */
-void
-pfm_inherit(struct task_struct *task, struct pt_regs *regs)
-{
- struct thread_struct *thread;
-
- DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid));
-
- thread = &task->thread;
-
- /*
- * cut links inherited from parent (current)
- */
- thread->pfm_context = NULL;
-
- PFM_SET_WORK_PENDING(task, 0);
-
- /*
- * the psr bits are already set properly in copy_threads()
- */
-}
-#endif
-#else /* !CONFIG_PERFMON */
-asmlinkage long
-sys_perfmonctl (int fd, int cmd, void *arg, int count)
-{
- return -ENOSYS;
-}
-#endif /* CONFIG_PERFMON */
-
-
-#ifdef XEN
-static int xenpfm_context_unload(void);
-static int xenpfm_start_stop_locked(int is_start);
-DEFINE_PER_CPU(pfm_context_t*, xenpfm_context);
-
-/*
- * note: some functions mask interrupt with this lock held
- * so that this lock can't be locked from interrupt handler.
- */
-DEFINE_SPINLOCK(xenpfm_context_lock);
-
-static int
-xenpfm_get_features(XEN_GUEST_HANDLE(pfarg_features_t) req)
-{
- pfarg_features_t res;
- if (guest_handle_is_null(req))
- return -EFAULT;
-
- memset(&res, 0, sizeof(res));
- pfm_get_features(NULL, &res, 0, NULL);
- if (copy_to_guest(req, &res, 1))
- return -EFAULT;
- return 0;
-}
-
-static int
-xenpfm_pfarg_is_sane(pfarg_context_t* pfx)
-{
- int error;
- int ctx_flags;
-
- error = pfarg_is_sane(NULL, pfx);
- if (error)
- return error;
-
- ctx_flags = pfx->ctx_flags;
- if (!(ctx_flags & PFM_FL_SYSTEM_WIDE) ||
- ctx_flags & PFM_FL_NOTIFY_BLOCK ||
- ctx_flags & PFM_FL_OVFL_NO_MSG)
- return -EINVAL;
-
- /* probably more to add here */
-
- return 0;
-}
-
-static int
-xenpfm_context_create(XEN_GUEST_HANDLE(pfarg_context_t) req)
-{
- int error;
- pfarg_context_t kreq;
-
- int cpu;
- pfm_context_t* ctx[NR_CPUS] = {[0 ... (NR_CPUS - 1)] = NULL};
-
- if (copy_from_guest(&kreq, req, 1)) {
- error = -EINVAL;
- goto out;
- }
-
- error = xenpfm_pfarg_is_sane(&kreq);
- if (error)
- goto out;
-
- /* XXX fmt */
- for_each_possible_cpu(cpu) {
- ctx[cpu] = pfm_context_create(&kreq);
- if (ctx[cpu] == NULL) {
- error = -ENOMEM;
- break;
- }
- }
- if (error)
- goto out;
-
- BUG_ON(in_irq());
- spin_lock(&xenpfm_context_lock);
- for_each_possible_cpu(cpu) {
- if (per_cpu(xenpfm_context, cpu) != NULL) {
- error = -EBUSY;
- break;
- }
- }
- for_each_possible_cpu(cpu) {
- per_cpu(xenpfm_context, cpu) = ctx[cpu];
- ctx[cpu] = NULL;
- }
- spin_unlock(&xenpfm_context_lock);
-
-out:
- for_each_possible_cpu(cpu) {
- if (ctx[cpu] != NULL)
- pfm_context_free(ctx[cpu]);
- }
- return error;
-}
-
-static int
-xenpfm_context_destroy(void)
-{
- int cpu;
- pfm_context_t* ctx;
- unsigned long flags;
- unsigned long need_unload;
- int error = 0;
-
-again:
- need_unload = 0;
- BUG_ON(in_irq());
- spin_lock_irqsave(&xenpfm_context_lock, flags);
- for_each_possible_cpu(cpu) {
- ctx = per_cpu(xenpfm_context, cpu);
- if (ctx == NULL) {
- error = -EINVAL;
- break;
- }
- PROTECT_CTX_NOIRQ(ctx);
- if (ctx->ctx_state != PFM_CTX_UNLOADED)
- need_unload = 1;
- }
- if (error) {
- for_each_possible_cpu(cpu) {
- ctx = per_cpu(xenpfm_context, cpu);
- if (ctx == NULL)
- break;
- UNPROTECT_CTX_NOIRQ(per_cpu(xenpfm_context, cpu));
- }
- goto out;
- }
- if (need_unload) {
- for_each_possible_cpu(cpu)
- UNPROTECT_CTX_NOIRQ(per_cpu(xenpfm_context, cpu));
- spin_unlock_irqrestore(&xenpfm_context_lock, flags);
-
- error = xenpfm_context_unload();
- if (error)
- return error;
- goto again;
- }
-
- for_each_possible_cpu(cpu) {
- pfm_context_t* ctx = per_cpu(xenpfm_context, cpu);
- per_cpu(xenpfm_context, cpu) = NULL;
-
- /* pfm_close() unlocks spinlock and free the context. */
- error |= pfm_close(ctx);
- }
-out:
- spin_unlock_irqrestore(&xenpfm_context_lock, flags);
- return error;
-}
-
-static int
-xenpfm_write_pmcs(XEN_GUEST_HANDLE(pfarg_reg_t) req, unsigned long count)
-{
- unsigned long i;
- int error = 0;
- unsigned long flags;
-
- for (i = 0; i < count; i++) {
- pfarg_reg_t kreq;
- int cpu;
- if (copy_from_guest_offset(&kreq, req, i, 1)) {
- error = -EFAULT;
- goto out;
- }
- BUG_ON(in_irq());
- spin_lock_irqsave(&xenpfm_context_lock, flags);
- for_each_online_cpu(cpu) {
- pfm_context_t* ctx = per_cpu(xenpfm_context, cpu);
- BUG_ON(ctx == NULL);
- PROTECT_CTX_NOIRQ(ctx);
- error |= pfm_write_pmcs(ctx, (void *)&kreq, 1, NULL);
- UNPROTECT_CTX_NOIRQ(ctx);
- }
- spin_unlock_irqrestore(&xenpfm_context_lock, flags);
- }
-
- /* XXX if is loaded, change all physical cpus pmcs. */
- /* Currently results in error */
-out:
- return error;
-}
-
-static int
-xenpfm_write_pmds(XEN_GUEST_HANDLE(pfarg_reg_t) req, unsigned long count)
-{
- unsigned long i;
- int error = 0;
-
- for (i = 0; i < count; i++) {
- pfarg_reg_t kreq;
- int cpu;
- unsigned long flags;
- if (copy_from_guest_offset(&kreq, req, i, 1)) {
- error = -EFAULT;
- goto out;
- }
- BUG_ON(in_irq());
- spin_lock_irqsave(&xenpfm_context_lock, flags);
- for_each_online_cpu(cpu) {
- pfm_context_t* ctx = per_cpu(xenpfm_context, cpu);
- BUG_ON(ctx == NULL);
- PROTECT_CTX_NOIRQ(ctx);
- error |= pfm_write_pmds(ctx, &kreq, 1, NULL);
- UNPROTECT_CTX_NOIRQ(ctx);
- }
- spin_unlock_irqrestore(&xenpfm_context_lock, flags);
- }
-
- /* XXX if is loaded, change all physical cpus pmds. */
- /* Currently results in error */
-out:
- return error;
-}
-
-struct xenpfm_context_load_arg {
- pfarg_load_t* req;
- int error[NR_CPUS];
-};
-
-static void
-xenpfm_context_load_cpu(void* info)
-{
- unsigned long flags;
- struct xenpfm_context_load_arg* arg = (struct xenpfm_context_load_arg*)info;
- pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
-
- BUG_ON(ctx == NULL);
- PROTECT_CTX(ctx, flags);
- arg->error[smp_processor_id()] = pfm_context_load(ctx, arg->req, 0, NULL);
- UNPROTECT_CTX(ctx, flags);
-}
-
-static int
-xenpfm_context_load(XEN_GUEST_HANDLE(pfarg_load_t) req)
-{
- pfarg_load_t kreq;
- int cpu;
- struct xenpfm_context_load_arg arg;
- int error = 0;
-
- if (copy_from_guest(&kreq, req, 1))
- return -EFAULT;
-
- arg.req = &kreq;
- for_each_online_cpu(cpu)
- arg.error[cpu] = 0;
-
- BUG_ON(in_irq());
- spin_lock(&xenpfm_context_lock);
- smp_call_function(&xenpfm_context_load_cpu, &arg, 1);
- xenpfm_context_load_cpu(&arg);
- spin_unlock(&xenpfm_context_lock);
- for_each_online_cpu(cpu) {
- if (arg.error[cpu]) {
- gdprintk(XENLOG_INFO, "%s: cpu %d error %d\n",
- __func__, cpu, arg.error[cpu]);
- error = arg.error[cpu];
- }
- }
- return 0;
-}
-
-
-struct xenpfm_context_unload_arg {
- int error[NR_CPUS];
-};
-
-static void
-xenpfm_context_unload_cpu(void* info)
-{
- unsigned long flags;
- struct xenpfm_context_unload_arg* arg = (struct xenpfm_context_unload_arg*)info;
- pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
- BUG_ON(ctx == NULL);
- PROTECT_CTX(ctx, flags);
- arg->error[smp_processor_id()] = pfm_context_unload(ctx, NULL, 0, NULL);
- UNPROTECT_CTX(ctx, flags);
-}
-
-static int
-xenpfm_context_unload(void)
-{
- int cpu;
- struct xenpfm_context_unload_arg arg;
- unsigned long flags;
- int error = 0;
-
- for_each_online_cpu(cpu)
- arg.error[cpu] = 0;
-
- BUG_ON(in_irq());
- local_irq_save(flags);
- if (!spin_trylock(&xenpfm_context_lock)) {
- local_irq_restore(flags);
- return -EAGAIN;
- }
- error = xenpfm_start_stop_locked(0);
- local_irq_restore(flags);
- if (error) {
- spin_unlock(&xenpfm_context_lock);
- return error;
- }
-
- smp_call_function(&xenpfm_context_unload_cpu, &arg, 1);
- xenpfm_context_unload_cpu(&arg);
- spin_unlock(&xenpfm_context_lock);
- for_each_online_cpu(cpu) {
- if (arg.error[cpu]) {
- gdprintk(XENLOG_INFO, "%s: cpu %d error %d\n",
- __func__, cpu, arg.error[cpu]);
- error = arg.error[cpu];
- }
- }
- return error;
-}
-
-static int
-__xenpfm_start(void)
-{
- pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
- int state;
- int error = 0;
-
- BUG_ON(ctx == NULL);
- BUG_ON(local_irq_is_enabled());
- PROTECT_CTX_NOIRQ(ctx);
- state = ctx->ctx_state;
- if (state != PFM_CTX_LOADED) {
- gdprintk(XENLOG_DEBUG, "%s state %d\n", __func__, state);
- error = -EINVAL;
- goto out;
- }
-
- /* now update the local PMU and cpuinfo */
- PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
-
- /* start monitoring at kernel level */
- pfm_set_psr_pp();
-
- /* start monitoring at kernel level */
- pfm_set_psr_up();
-
- /* enable dcr pp */
- ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
- ia64_srlz_i();
-out:
- UNPROTECT_CTX_NOIRQ(ctx);
- return error;
-}
-
-static int
-__xenpfm_stop(void)
-{
- pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
- int state;
- int error = 0;
-
- BUG_ON(local_irq_is_enabled());
- if (ctx == NULL) {
- gdprintk(XENLOG_DEBUG, "%s ctx=NULL p:%2d v:%2d\n",
- __func__, smp_processor_id(), current->vcpu_id);
- return 0;
- }
-
- PROTECT_CTX_NOIRQ(ctx);
- state = ctx->ctx_state;
- if (state != PFM_CTX_LOADED) {
- gdprintk(XENLOG_DEBUG, "%s state %d p:%2d v:%2d\n",
- __func__, state,
- smp_processor_id(), current->vcpu_id);
- error = -EINVAL;
- goto out;
- }
-
- /*
- * Update local PMU first
- *
- * disable dcr pp
- */
- ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
- ia64_srlz_i();
-
- /* update local cpuinfo */
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
-
- /* stop monitoring, does srlz.i */
- pfm_clear_psr_pp();
-
- /* stop monitoring at kernel level */
- pfm_clear_psr_up();
-out:
- UNPROTECT_CTX_NOIRQ(ctx);
- return error;
-}
-
-int
-__xenpfm_start_stop(int is_start)
-{
- if (is_start)
- return __xenpfm_start();
- else
- return __xenpfm_stop();
-}
-
-struct xenpfm_start_arg {
- int is_start;
- atomic_t started;
- atomic_t finished;
- int error[NR_CPUS];
-};
-
-static void
-xenpfm_start_stop_cpu(void* info)
-{
- unsigned long flags;
- struct xenpfm_start_arg* arg = (struct xenpfm_start_arg*)info;
-
- local_irq_save(flags);
- atomic_inc(&arg->started);
- while (!atomic_read(&arg->finished))
- cpu_relax();
-
- arg->error[smp_processor_id()] = __xenpfm_start_stop(arg->is_start);
-
- atomic_inc(&arg->finished);
- local_irq_restore(flags);
-}
-
-static void
-xenpfm_start_stop_vcpu(struct vcpu* v, int is_start)
-{
- struct pt_regs *regs = vcpu_regs(v);
-
- if (is_start) {
- /* set user level psr.pp for the caller */
- ia64_psr(regs)->pp = 1;
-
- /* activate monitoring at user level */
- ia64_psr(regs)->up = 1;
-
- /* don't allow user level control */
- ia64_psr(regs)->sp = 1;
- } else {
- /*
- * stop monitoring in the caller
- */
- ia64_psr(regs)->pp = 0;
-
- /*
- * stop monitoring at the user level
- */
- ia64_psr(regs)->up = 0;
-
-#if 0
- /*
- * cancel user level control
- */
- ia64_psr(regs)->sp = 0;
-#endif
- }
-}
-
-/*
- * This is the trickiest part.
- * Here we want to enable/disable wide performance monitor including
- * all xen context and all guest.
- * For interrupt context and running vcpu, set dcr.pp = 1
- * For blocked vcpu and idle vcpu, set psr.pp = 1 using timer via softirq.
- * (Here IPI doesn't work because psr doesn't preserved over interruption
- * when VTi domain.
- * If IPI is used, we need to unwind the stack to the interrupt frame
- * and set cr_ipsr.pp = 1. but using timer via do_softirq() is easier.)
- * For guest set all vcpu_regs(v)->cr_ipsr.pp = 1.
- */
-static int
-xenpfm_start_stop_locked(int is_start)
-{
- /* avoid stack over flow. protected by xenpfm_context_lock */
- static struct timer xenpfm_timer[NR_CPUS];
-
- struct xenpfm_start_arg arg;
- int cpus = num_online_cpus();
- int cpu;
- struct domain* d;
- struct vcpu* v;
- int error = 0;
-
- arg.is_start = is_start;
- atomic_set(&arg.started, 1); /* 1 for this cpu */
- atomic_set(&arg.finished, 0);
- for_each_possible_cpu(cpu)
- arg.error[cpu] = 0;
-
- BUG_ON(!spin_is_locked(&xenpfm_context_lock));
- for_each_online_cpu(cpu) {
- struct timer* start_stop_timer = &xenpfm_timer[cpu];
- if (cpu == smp_processor_id())
- continue;
- init_timer(start_stop_timer, &xenpfm_start_stop_cpu,
- &arg, cpu);
- set_timer(start_stop_timer, 0);/* fire it ASAP */
- }
-
- while (atomic_read(&arg.started) != cpus)
- cpu_relax();
-
- rcu_read_lock(&domlist_read_lock);
- for_each_domain(d)
- for_each_vcpu(d, v)
- xenpfm_start_stop_vcpu(v, is_start);
- rcu_read_unlock(&domlist_read_lock);
-
- arg.error[smp_processor_id()] = __xenpfm_start_stop(is_start);
- atomic_inc(&arg.finished);
-
- while (atomic_read(&arg.finished) != cpus)
- cpu_relax();
-
- for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- /* xenpfm_timer[] is global so that we have to wait
- * for xen timer subsystem to finish them. */
- kill_timer(&xenpfm_timer[cpu]);
- if (arg.error[cpu]) {
- gdprintk(XENLOG_INFO, "%s: cpu %d error %d\n",
- __func__, cpu, arg.error[cpu]);
- error = arg.error[cpu];
- }
- }
- return error;
-}
-
-static int
-xenpfm_start_stop(int is_start)
-{
- unsigned long flags;
- int error;
-
- BUG_ON(in_irq());
- local_irq_save(flags);
- /*
- * Avoid dead lock. At this moment xen has only spin locks and
- * doesn't have blocking mutex.
- */
- if (!spin_trylock(&xenpfm_context_lock)) {
- local_irq_restore(flags);
- gdprintk(XENLOG_DEBUG, "%s EAGAIN\n", __func__);
- return -EAGAIN;
- }
- error = xenpfm_start_stop_locked(is_start);
- spin_unlock_irqrestore(&xenpfm_context_lock, flags);
-
- return error;
-}
-
-#define NONPRIV_OP(cmd) (((cmd) == PFM_GET_FEATURES))
-
-int
-do_perfmon_op(unsigned long cmd,
- XEN_GUEST_HANDLE(void) arg1, unsigned long count)
-{
- unsigned long error = 0;
-
- if (!NONPRIV_OP(cmd) && current->domain->domain_id !=0) {
- gdprintk(XENLOG_INFO, "xen perfmon: "
- "dom %d denied privileged operation %ld\n",
- current->domain->domain_id, cmd);
- return -EPERM;
- }
- switch (cmd) {
- case PFM_GET_FEATURES:
- error = xenpfm_get_features(guest_handle_cast(arg1, pfarg_features_t));
- break;
-
- case PFM_CREATE_CONTEXT:
- error = xenpfm_context_create(guest_handle_cast(arg1, pfarg_context_t));
- break;
- case PFM_DESTROY_CONTEXT:
- error = xenpfm_context_destroy();
- break;
-
- case PFM_WRITE_PMCS:
- error = xenpfm_write_pmcs(guest_handle_cast(arg1, pfarg_reg_t), count);
- break;
- case PFM_WRITE_PMDS:
- error = xenpfm_write_pmds(guest_handle_cast(arg1, pfarg_reg_t), count);
- break;
- case PFM_READ_PMDS:
- error = -ENOSYS;
- break;
- case PFM_GET_PMC_RESET_VAL:
- error = -ENOSYS;
- break;
-
- case PFM_LOAD_CONTEXT:
- error = xenpfm_context_load(guest_handle_cast(arg1, pfarg_load_t));
- break;
- case PFM_UNLOAD_CONTEXT:
- error = xenpfm_context_unload();
- break;
-
- case PFM_START:
- error = xenpfm_start_stop(1);
- break;
- case PFM_STOP:
- error = xenpfm_start_stop(0);
- break;
- case PFM_RESTART:
- error = -ENOSYS;
- break;
-
- case PFM_DEBUG:
- error = -ENOSYS;
- break;
-
- case PFM_ENABLE:
- case PFM_DISABLE:
- case PFM_PROTECT_CONTEXT:
- case PFM_UNPROTECT_CONTEXT:
- default:
- error = -EINVAL;
- break;
- }
- return error;
-}
-#endif
diff --git a/xen/arch/ia64/linux-xen/perfmon_default_smpl.c b/xen/arch/ia64/linux-xen/perfmon_default_smpl.c
deleted file mode 100644
index f10a5a6f0a..0000000000
--- a/xen/arch/ia64/linux-xen/perfmon_default_smpl.c
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- *
- * This file implements the default sampling buffer format
- * for the Linux/ia64 perfmon-2 subsystem.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/config.h>
-#include <linux/init.h>
-#include <asm/delay.h>
-#include <linux/smp.h>
-
-#include <asm/perfmon.h>
-#include <asm/perfmon_default_smpl.h>
-
-#ifndef XEN
-MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
-MODULE_DESCRIPTION("perfmon default sampling format");
-MODULE_LICENSE("GPL");
-#endif
-
-#ifdef XEN
-#define pid vcpu_id
-#endif
-
-#define DEFAULT_DEBUG 1
-
-#ifdef DEFAULT_DEBUG
-#define DPRINT(a) \
- do { \
- if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
- } while (0)
-
-#define DPRINT_ovfl(a) \
- do { \
- if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
- } while (0)
-
-#else
-#define DPRINT(a)
-#define DPRINT_ovfl(a)
-#endif
-
-static int
-default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data)
-{
- pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t*)data;
- int ret = 0;
-
- if (data == NULL) {
- DPRINT(("[%d] no argument passed\n", task->pid));
- return -EINVAL;
- }
-
- DPRINT(("[%d] validate flags=0x%x CPU%d\n", task->pid, flags, cpu));
-
- /*
- * must hold at least the buffer header + one minimally sized entry
- */
- if (arg->buf_size < PFM_DEFAULT_SMPL_MIN_BUF_SIZE) return -EINVAL;
-
- DPRINT(("buf_size=%lu\n", arg->buf_size));
-
- return ret;
-}
-
-static int
-default_get_size(struct task_struct *task, unsigned int flags, int cpu, void *data, unsigned long *size)
-{
- pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t *)data;
-
- /*
- * size has been validated in default_validate
- */
- *size = arg->buf_size;
-
- return 0;
-}
-
-static int
-default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *data)
-{
- pfm_default_smpl_hdr_t *hdr;
- pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t *)data;
-
- hdr = (pfm_default_smpl_hdr_t *)buf;
-
- hdr->hdr_version = PFM_DEFAULT_SMPL_VERSION;
- hdr->hdr_buf_size = arg->buf_size;
- hdr->hdr_cur_offs = sizeof(*hdr);
- hdr->hdr_overflows = 0UL;
- hdr->hdr_count = 0UL;
-
- DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u cur_offs=%lu\n",
- task->pid,
- buf,
- hdr->hdr_buf_size,
- sizeof(*hdr),
- hdr->hdr_version,
- hdr->hdr_cur_offs));
-
- return 0;
-}
-
-static int
-default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp)
-{
- pfm_default_smpl_hdr_t *hdr;
- pfm_default_smpl_entry_t *ent;
- void *cur, *last;
- unsigned long *e, entry_size;
- unsigned int npmds, i;
- unsigned char ovfl_pmd;
- unsigned char ovfl_notify;
-
- if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) {
- DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg));
- return -EINVAL;
- }
-
- hdr = (pfm_default_smpl_hdr_t *)buf;
- cur = buf+hdr->hdr_cur_offs;
- last = buf+hdr->hdr_buf_size;
- ovfl_pmd = arg->ovfl_pmd;
- ovfl_notify = arg->ovfl_notify;
-
- /*
- * precheck for sanity
- */
- if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full;
-
- npmds = hweight64(arg->smpl_pmds[0]);
-
- ent = (pfm_default_smpl_entry_t *)cur;
-
- prefetch(arg->smpl_pmds_values);
-
- entry_size = sizeof(*ent) + (npmds << 3);
-
- /* position for first pmd */
- e = (unsigned long *)(ent+1);
-
- hdr->hdr_count++;
-
- DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmd=%d ovfl_notify=%d npmds=%u\n",
- task->pid,
- hdr->hdr_count,
- cur, last,
- last-cur,
- ovfl_pmd,
- ovfl_notify, npmds));
-
- /*
- * current = task running at the time of the overflow.
- *
- * per-task mode:
- * - this is ususally the task being monitored.
- * Under certain conditions, it might be a different task
- *
- * system-wide:
- * - this is not necessarily the task controlling the session
- */
-#ifndef XEN
- ent->pid = current->pid;
-#endif
- ent->ovfl_pmd = ovfl_pmd;
- ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val;
-
- /*
- * where did the fault happen (includes slot number)
- */
- ent->ip = regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3);
-
- ent->tstamp = stamp;
- ent->cpu = smp_processor_id();
- ent->set = arg->active_set;
-#ifndef XEN
- ent->tgid = current->tgid;
-#endif
-
- /*
- * selectively store PMDs in increasing index number
- */
- if (npmds) {
- unsigned long *val = arg->smpl_pmds_values;
- for(i=0; i < npmds; i++) {
- *e++ = *val++;
- }
- }
-
- /*
- * update position for next entry
- */
- hdr->hdr_cur_offs += entry_size;
- cur += entry_size;
-
- /*
- * post check to avoid losing the last sample
- */
- if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full;
-
- /*
- * keep same ovfl_pmds, ovfl_notify
- */
- arg->ovfl_ctrl.bits.notify_user = 0;
- arg->ovfl_ctrl.bits.block_task = 0;
- arg->ovfl_ctrl.bits.mask_monitoring = 0;
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1; /* reset before returning from interrupt handler */
-
- return 0;
-full:
- DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=%d\n", last-cur, hdr->hdr_count, ovfl_notify));
-
- /*
- * increment number of buffer overflow.
- * important to detect duplicate set of samples.
- */
- hdr->hdr_overflows++;
-
- /*
- * if no notification requested, then we saturate the buffer
- */
- if (ovfl_notify == 0) {
- arg->ovfl_ctrl.bits.notify_user = 0;
- arg->ovfl_ctrl.bits.block_task = 0;
- arg->ovfl_ctrl.bits.mask_monitoring = 1;
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0;
- } else {
- arg->ovfl_ctrl.bits.notify_user = 1;
- arg->ovfl_ctrl.bits.block_task = 1; /* ignored for non-blocking context */
- arg->ovfl_ctrl.bits.mask_monitoring = 1;
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0; /* no reset now */
- }
- return -1; /* we are full, sorry */
-}
-
-static int
-default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
-{
- pfm_default_smpl_hdr_t *hdr;
-
- hdr = (pfm_default_smpl_hdr_t *)buf;
-
- hdr->hdr_count = 0UL;
- hdr->hdr_cur_offs = sizeof(*hdr);
-
- ctrl->bits.mask_monitoring = 0;
- ctrl->bits.reset_ovfl_pmds = 1; /* uses long-reset values */
-
- return 0;
-}
-
-static int
-default_exit(struct task_struct *task, void *buf, struct pt_regs *regs)
-{
- DPRINT(("[%d] exit(%p)\n", task->pid, buf));
- return 0;
-}
-
-static pfm_buffer_fmt_t default_fmt={
- .fmt_name = "default_format",
- .fmt_uuid = PFM_DEFAULT_SMPL_UUID,
- .fmt_arg_size = sizeof(pfm_default_smpl_arg_t),
- .fmt_validate = default_validate,
- .fmt_getsize = default_get_size,
- .fmt_init = default_init,
- .fmt_handler = default_handler,
- .fmt_restart = default_restart,
- .fmt_restart_active = default_restart,
- .fmt_exit = default_exit,
-};
-
-#ifndef XEN
-static int __init
-pfm_default_smpl_init_module(void)
-{
- int ret;
-
- ret = pfm_register_buffer_fmt(&default_fmt);
- if (ret == 0) {
- printk("perfmon_default_smpl: %s v%u.%u registered\n",
- default_fmt.fmt_name,
- PFM_DEFAULT_SMPL_VERSION_MAJ,
- PFM_DEFAULT_SMPL_VERSION_MIN);
- } else {
- printk("perfmon_default_smpl: %s cannot register ret=%d\n",
- default_fmt.fmt_name,
- ret);
- }
-
- return ret;
-}
-#endif
-
-static void __exit
-pfm_default_smpl_cleanup_module(void)
-{
- int ret;
- ret = pfm_unregister_buffer_fmt(default_fmt.fmt_uuid);
-
- printk("perfmon_default_smpl: unregister %s=%d\n", default_fmt.fmt_name, ret);
-}
-
-#ifndef XEN
-module_init(pfm_default_smpl_init_module);
-module_exit(pfm_default_smpl_cleanup_module);
-#endif
-
diff --git a/xen/arch/ia64/linux-xen/perfmon_generic.h b/xen/arch/ia64/linux-xen/perfmon_generic.h
deleted file mode 100644
index 6748947804..0000000000
--- a/xen/arch/ia64/linux-xen/perfmon_generic.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * This file contains the generic PMU register description tables
- * and pmc checker used by perfmon.c.
- *
- * Copyright (C) 2002-2003 Hewlett Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-
-static pfm_reg_desc_t pfm_gen_pmc_desc[PMU_MAX_PMCS]={
-/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static pfm_reg_desc_t pfm_gen_pmd_desc[PMU_MAX_PMDS]={
-/* pmd0 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd1 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd2 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd3 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
-/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
-/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
-/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-/*
- * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
- */
-static pmu_config_t pmu_conf_gen={
- .pmu_name = "Generic",
- .pmu_family = 0xff, /* any */
- .ovfl_val = (1UL << 32) - 1,
- .num_ibrs = 0, /* does not use */
- .num_dbrs = 0, /* does not use */
- .pmd_desc = pfm_gen_pmd_desc,
- .pmc_desc = pfm_gen_pmc_desc
-};
-
diff --git a/xen/arch/ia64/linux-xen/perfmon_itanium.h b/xen/arch/ia64/linux-xen/perfmon_itanium.h
deleted file mode 100644
index d1d508a0fb..0000000000
--- a/xen/arch/ia64/linux-xen/perfmon_itanium.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * This file contains the Itanium PMU register description tables
- * and pmc checker used by perfmon.c.
- *
- * Copyright (C) 2002-2003 Hewlett Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-static int pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-
-static pfm_reg_desc_t pfm_ita_pmc_desc[PMU_MAX_PMCS]={
-/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc4 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc8 */ { PFM_REG_CONFIG , 0, 0xf00000003ffffff8UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc9 */ { PFM_REG_CONFIG , 0, 0xf00000003ffffff8UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc10 */ { PFM_REG_MONITOR , 6, 0x0UL, -1UL, NULL, NULL, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc11 */ { PFM_REG_MONITOR , 6, 0x0000000010000000UL, -1UL, NULL, pfm_ita_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc13 */ { PFM_REG_CONFIG , 0, 0x0003ffff00000001UL, -1UL, NULL, pfm_ita_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static pfm_reg_desc_t pfm_ita_pmd_desc[PMU_MAX_PMDS]={
-/* pmd0 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
-/* pmd1 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
-/* pmd2 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
-/* pmd3 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
-/* pmd4 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
-/* pmd5 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
-/* pmd6 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
-/* pmd7 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
-/* pmd8 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd9 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd10 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd11 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd12 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd13 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd14 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd15 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd16 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd17 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static int
-pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- int ret;
- int is_loaded;
-
- /* sanitfy check */
- if (ctx == NULL) return -EINVAL;
-
- is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
-
- /*
- * we must clear the (instruction) debug registers if pmc13.ta bit is cleared
- * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
- */
- if (cnum == 13 && is_loaded && ((*val & 0x1) == 0UL) && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc13.ta cleared, clearing ibr\n", cnum, *val));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(1, ctx, NULL, 0, regs);
- if (ret) return ret;
- }
-
- /*
- * we must clear the (data) debug registers if pmc11.pt bit is cleared
- * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
- */
- if (cnum == 11 && is_loaded && ((*val >> 28)& 0x1) == 0 && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc11.pt cleared, clearing dbr\n", cnum, *val));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(0, ctx, NULL, 0, regs);
- if (ret) return ret;
- }
- return 0;
-}
-
-/*
- * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
- */
-static pmu_config_t pmu_conf_ita={
- .pmu_name = "Itanium",
- .pmu_family = 0x7,
- .ovfl_val = (1UL << 32) - 1,
- .pmd_desc = pfm_ita_pmd_desc,
- .pmc_desc = pfm_ita_pmc_desc,
- .num_ibrs = 8,
- .num_dbrs = 8,
- .use_rr_dbregs = 1, /* debug register are use for range retrictions */
-};
-
-
diff --git a/xen/arch/ia64/linux-xen/perfmon_mckinley.h b/xen/arch/ia64/linux-xen/perfmon_mckinley.h
deleted file mode 100644
index 9becccda28..0000000000
--- a/xen/arch/ia64/linux-xen/perfmon_mckinley.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * This file contains the McKinley PMU register description tables
- * and pmc checker used by perfmon.c.
- *
- * Copyright (C) 2002-2003 Hewlett Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-static int pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-
-static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={
-/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc4 */ { PFM_REG_COUNTING, 6, 0x0000000000800000UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc8 */ { PFM_REG_CONFIG , 0, 0xffffffff3fffffffUL, 0xffffffff3ffffffbUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc9 */ { PFM_REG_CONFIG , 0, 0xffffffff3ffffffcUL, 0xffffffff3ffffffbUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc10 */ { PFM_REG_MONITOR , 4, 0x0UL, 0xffffUL, NULL, pfm_mck_pmc_check, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc11 */ { PFM_REG_MONITOR , 6, 0x0UL, 0x30f01cf, NULL, pfm_mck_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, 0xffffUL, NULL, pfm_mck_pmc_check, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc13 */ { PFM_REG_CONFIG , 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc14 */ { PFM_REG_CONFIG , 0, 0x0db60db60db60db6UL, 0x2492UL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc15 */ { PFM_REG_CONFIG , 0, 0x00000000fffffff0UL, 0xfUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static pfm_reg_desc_t pfm_mck_pmd_desc[PMU_MAX_PMDS]={
-/* pmd0 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
-/* pmd1 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
-/* pmd2 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
-/* pmd3 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
-/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
-/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
-/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
-/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
-/* pmd8 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd9 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd10 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd11 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd12 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd13 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd14 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd15 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd16 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd17 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-/*
- * PMC reserved fields must have their power-up values preserved
- */
-static int
-pfm_mck_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- unsigned long tmp1, tmp2, ival = *val;
-
- /* remove reserved areas from user value */
- tmp1 = ival & PMC_RSVD_MASK(cnum);
-
- /* get reserved fields values */
- tmp2 = PMC_DFL_VAL(cnum) & ~PMC_RSVD_MASK(cnum);
-
- *val = tmp1 | tmp2;
-
- DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
- cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
- return 0;
-}
-
-/*
- * task can be NULL if the context is unloaded
- */
-static int
-pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- int ret = 0, check_case1 = 0;
- unsigned long val8 = 0, val14 = 0, val13 = 0;
- int is_loaded;
-
- /* first preserve the reserved fields */
- pfm_mck_reserved(cnum, val, regs);
-
- /* sanitfy check */
- if (ctx == NULL) return -EINVAL;
-
- is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
-
- /*
- * we must clear the debug registers if pmc13 has a value which enable
- * memory pipeline event constraints. In this case we need to clear the
- * the debug registers if they have not yet been accessed. This is required
- * to avoid picking stale state.
- * PMC13 is "active" if:
- * one of the pmc13.cfg_dbrpXX field is different from 0x3
- * AND
- * at the corresponding pmc13.ena_dbrpXX is set.
- */
- DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, *val, ctx->ctx_fl_using_dbreg, is_loaded));
-
- if (cnum == 13 && is_loaded
- && (*val & 0x1e00000000000UL) && (*val & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc13 settings, clearing dbr\n", cnum, *val));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
- }
- /*
- * we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
- * before they are (fl_using_dbreg==0) to avoid picking up stale information.
- */
- if (cnum == 14 && is_loaded && ((*val & 0x2222UL) != 0x2222UL) && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc14 settings, clearing ibr\n", cnum, *val));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
-
- }
-
- switch(cnum) {
- case 4: *val |= 1UL << 23; /* force power enable bit */
- break;
- case 8: val8 = *val;
- val13 = ctx->ctx_pmcs[13];
- val14 = ctx->ctx_pmcs[14];
- check_case1 = 1;
- break;
- case 13: val8 = ctx->ctx_pmcs[8];
- val13 = *val;
- val14 = ctx->ctx_pmcs[14];
- check_case1 = 1;
- break;
- case 14: val8 = ctx->ctx_pmcs[8];
- val13 = ctx->ctx_pmcs[13];
- val14 = *val;
- check_case1 = 1;
- break;
- }
- /* check illegal configuration which can produce inconsistencies in tagging
- * i-side events in L1D and L2 caches
- */
- if (check_case1) {
- ret = ((val13 >> 45) & 0xf) == 0
- && ((val8 & 0x1) == 0)
- && ((((val14>>1) & 0x3) == 0x2 || ((val14>>1) & 0x3) == 0x0)
- ||(((val14>>4) & 0x3) == 0x2 || ((val14>>4) & 0x3) == 0x0));
-
- if (ret) DPRINT((KERN_DEBUG "perfmon: failure check_case1\n"));
- }
-
- return ret ? -EINVAL : 0;
-}
-
-/*
- * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
- */
-static pmu_config_t pmu_conf_mck={
- .pmu_name = "Itanium 2",
- .pmu_family = 0x1f,
- .flags = PFM_PMU_IRQ_RESEND,
- .ovfl_val = (1UL << 47) - 1,
- .pmd_desc = pfm_mck_pmd_desc,
- .pmc_desc = pfm_mck_pmc_desc,
- .num_ibrs = 8,
- .num_dbrs = 8,
- .use_rr_dbregs = 1 /* debug register are use for range retrictions */
-};
-
-
diff --git a/xen/arch/ia64/linux-xen/perfmon_montecito.h b/xen/arch/ia64/linux-xen/perfmon_montecito.h
deleted file mode 100644
index cd06ac6a68..0000000000
--- a/xen/arch/ia64/linux-xen/perfmon_montecito.h
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * This file contains the Montecito PMU register description tables
- * and pmc checker used by perfmon.c.
- *
- * Copyright (c) 2005-2006 Hewlett-Packard Development Company, L.P.
- * Contributed by Stephane Eranian <eranian@hpl.hp.com>
- */
-static int pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-
-#define RDEP_MONT_ETB (RDEP(38)|RDEP(39)|RDEP(48)|RDEP(49)|RDEP(50)|RDEP(51)|RDEP(52)|RDEP(53)|RDEP(54)|\
- RDEP(55)|RDEP(56)|RDEP(57)|RDEP(58)|RDEP(59)|RDEP(60)|RDEP(61)|RDEP(62)|RDEP(63))
-#define RDEP_MONT_DEAR (RDEP(32)|RDEP(33)|RDEP(36))
-#define RDEP_MONT_IEAR (RDEP(34)|RDEP(35))
-
-static pfm_reg_desc_t pfm_mont_pmc_desc[PMU_MAX_PMCS]={
-/* pmc0 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc4 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(4),0, 0, 0}, {0,0, 0, 0}},
-/* pmc5 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(5),0, 0, 0}, {0,0, 0, 0}},
-/* pmc6 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(6),0, 0, 0}, {0,0, 0, 0}},
-/* pmc7 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(7),0, 0, 0}, {0,0, 0, 0}},
-/* pmc8 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(8),0, 0, 0}, {0,0, 0, 0}},
-/* pmc9 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(9),0, 0, 0}, {0,0, 0, 0}},
-/* pmc10 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(10),0, 0, 0}, {0,0, 0, 0}},
-/* pmc11 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(11),0, 0, 0}, {0,0, 0, 0}},
-/* pmc12 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(12),0, 0, 0}, {0,0, 0, 0}},
-/* pmc13 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(13),0, 0, 0}, {0,0, 0, 0}},
-/* pmc14 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(14),0, 0, 0}, {0,0, 0, 0}},
-/* pmc15 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(15),0, 0, 0}, {0,0, 0, 0}},
-/* pmc16 */ { PFM_REG_NOTIMPL, },
-/* pmc17 */ { PFM_REG_NOTIMPL, },
-/* pmc18 */ { PFM_REG_NOTIMPL, },
-/* pmc19 */ { PFM_REG_NOTIMPL, },
-/* pmc20 */ { PFM_REG_NOTIMPL, },
-/* pmc21 */ { PFM_REG_NOTIMPL, },
-/* pmc22 */ { PFM_REG_NOTIMPL, },
-/* pmc23 */ { PFM_REG_NOTIMPL, },
-/* pmc24 */ { PFM_REG_NOTIMPL, },
-/* pmc25 */ { PFM_REG_NOTIMPL, },
-/* pmc26 */ { PFM_REG_NOTIMPL, },
-/* pmc27 */ { PFM_REG_NOTIMPL, },
-/* pmc28 */ { PFM_REG_NOTIMPL, },
-/* pmc29 */ { PFM_REG_NOTIMPL, },
-/* pmc30 */ { PFM_REG_NOTIMPL, },
-/* pmc31 */ { PFM_REG_NOTIMPL, },
-/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffff, 0x30f01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffff, 0xf01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}},
-/* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
-/* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}},
-/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefe, 0x1e00018181818, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
- { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static pfm_reg_desc_t pfm_mont_pmd_desc[PMU_MAX_PMDS]={
-/* pmd0 */ { PFM_REG_NOTIMPL, },
-/* pmd1 */ { PFM_REG_NOTIMPL, },
-/* pmd2 */ { PFM_REG_NOTIMPL, },
-/* pmd3 */ { PFM_REG_NOTIMPL, },
-/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(4),0, 0, 0}},
-/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(5),0, 0, 0}},
-/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(6),0, 0, 0}},
-/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(7),0, 0, 0}},
-/* pmd8 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(8),0, 0, 0}},
-/* pmd9 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(9),0, 0, 0}},
-/* pmd10 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(10),0, 0, 0}},
-/* pmd11 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(11),0, 0, 0}},
-/* pmd12 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(12),0, 0, 0}},
-/* pmd13 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(13),0, 0, 0}},
-/* pmd14 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(14),0, 0, 0}},
-/* pmd15 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(15),0, 0, 0}},
-/* pmd16 */ { PFM_REG_NOTIMPL, },
-/* pmd17 */ { PFM_REG_NOTIMPL, },
-/* pmd18 */ { PFM_REG_NOTIMPL, },
-/* pmd19 */ { PFM_REG_NOTIMPL, },
-/* pmd20 */ { PFM_REG_NOTIMPL, },
-/* pmd21 */ { PFM_REG_NOTIMPL, },
-/* pmd22 */ { PFM_REG_NOTIMPL, },
-/* pmd23 */ { PFM_REG_NOTIMPL, },
-/* pmd24 */ { PFM_REG_NOTIMPL, },
-/* pmd25 */ { PFM_REG_NOTIMPL, },
-/* pmd26 */ { PFM_REG_NOTIMPL, },
-/* pmd27 */ { PFM_REG_NOTIMPL, },
-/* pmd28 */ { PFM_REG_NOTIMPL, },
-/* pmd29 */ { PFM_REG_NOTIMPL, },
-/* pmd30 */ { PFM_REG_NOTIMPL, },
-/* pmd31 */ { PFM_REG_NOTIMPL, },
-/* pmd32 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(33)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
-/* pmd33 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
-/* pmd34 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(35),0, 0, 0}, {RDEP(37),0, 0, 0}},
-/* pmd35 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(34),0, 0, 0}, {RDEP(37),0, 0, 0}},
-/* pmd36 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(33),0, 0, 0}, {RDEP(40),0, 0, 0}},
-/* pmd37 */ { PFM_REG_NOTIMPL, },
-/* pmd38 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd39 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd40 */ { PFM_REG_NOTIMPL, },
-/* pmd41 */ { PFM_REG_NOTIMPL, },
-/* pmd42 */ { PFM_REG_NOTIMPL, },
-/* pmd43 */ { PFM_REG_NOTIMPL, },
-/* pmd44 */ { PFM_REG_NOTIMPL, },
-/* pmd45 */ { PFM_REG_NOTIMPL, },
-/* pmd46 */ { PFM_REG_NOTIMPL, },
-/* pmd47 */ { PFM_REG_NOTIMPL, },
-/* pmd48 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd49 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd50 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd51 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd52 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd53 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd54 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd55 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd56 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd57 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd58 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd59 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd60 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd61 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd62 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd63 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
- { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-/*
- * PMC reserved fields must have their power-up values preserved
- */
-static int
-pfm_mont_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- unsigned long tmp1, tmp2, ival = *val;
-
- /* remove reserved areas from user value */
- tmp1 = ival & PMC_RSVD_MASK(cnum);
-
- /* get reserved fields values */
- tmp2 = PMC_DFL_VAL(cnum) & ~PMC_RSVD_MASK(cnum);
-
- *val = tmp1 | tmp2;
-
- DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
- cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
- return 0;
-}
-
-/*
- * task can be NULL if the context is unloaded
- */
-static int
-pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- int ret = 0;
- unsigned long val32 = 0, val38 = 0, val41 = 0;
- unsigned long tmpval;
- int check_case1 = 0;
- int is_loaded;
-
- /* first preserve the reserved fields */
- pfm_mont_reserved(cnum, val, regs);
-
- tmpval = *val;
-
- /* sanity check */
- if (ctx == NULL) return -EINVAL;
-
- is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
-
- /*
- * we must clear the debug registers if pmc41 has a value which enable
- * memory pipeline event constraints. In this case we need to clear the
- * the debug registers if they have not yet been accessed. This is required
- * to avoid picking stale state.
- * PMC41 is "active" if:
- * one of the pmc41.cfg_dtagXX field is different from 0x3
- * AND
- * at the corresponding pmc41.en_dbrpXX is set.
- * AND
- * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
- */
- DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded));
-
- if (cnum == 41 && is_loaded
- && (tmpval & 0x1e00000000000) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers if:
- * AND
- */
- ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
- }
- /*
- * we must clear the (instruction) debug registers if:
- * pmc38.ig_ibrpX is 0 (enabled)
- * AND
- * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
- */
- if (cnum == 38 && is_loaded && ((tmpval & 0x492UL) != 0x492UL) && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc38=0x%lx has active pmc38 settings, clearing ibr\n", tmpval));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
-
- }
- switch(cnum) {
- case 32: val32 = *val;
- val38 = ctx->ctx_pmcs[38];
- val41 = ctx->ctx_pmcs[41];
- check_case1 = 1;
- break;
- case 38: val38 = *val;
- val32 = ctx->ctx_pmcs[32];
- val41 = ctx->ctx_pmcs[41];
- check_case1 = 1;
- break;
- case 41: val41 = *val;
- val32 = ctx->ctx_pmcs[32];
- val38 = ctx->ctx_pmcs[38];
- check_case1 = 1;
- break;
- }
- /* check illegal configuration which can produce inconsistencies in tagging
- * i-side events in L1D and L2 caches
- */
- if (check_case1) {
- ret = (((val41 >> 45) & 0xf) == 0 && ((val32>>57) & 0x1) == 0)
- && ((((val38>>1) & 0x3) == 0x2 || ((val38>>1) & 0x3) == 0)
- || (((val38>>4) & 0x3) == 0x2 || ((val38>>4) & 0x3) == 0));
- if (ret) {
- DPRINT(("invalid config pmc38=0x%lx pmc41=0x%lx pmc32=0x%lx\n", val38, val41, val32));
- return -EINVAL;
- }
- }
- *val = tmpval;
- return 0;
-}
-
-/*
- * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
- */
-static pmu_config_t pmu_conf_mont={
- .pmu_name = "Montecito",
- .pmu_family = 0x20,
- .flags = PFM_PMU_IRQ_RESEND,
- .ovfl_val = (1UL << 47) - 1,
- .pmd_desc = pfm_mont_pmd_desc,
- .pmc_desc = pfm_mont_pmc_desc,
- .num_ibrs = 8,
- .num_dbrs = 8,
- .use_rr_dbregs = 1 /* debug register are use for range retrictions */
-};
diff --git a/xen/arch/ia64/linux-xen/process-linux-xen.c b/xen/arch/ia64/linux-xen/process-linux-xen.c
deleted file mode 100644
index f79109ce19..0000000000
--- a/xen/arch/ia64/linux-xen/process-linux-xen.c
+++ /dev/null
@@ -1,891 +0,0 @@
-/*
- * Architecture-specific setup.
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
- */
-#ifdef XEN
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-#include <xen/types.h>
-#include <xen/lib.h>
-#include <xen/symbols.h>
-#include <xen/smp.h>
-#include <xen/sched.h>
-#include <asm/elf.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/unwind.h>
-#include <asm/sal.h>
-#else
-#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
-#include <linux/config.h>
-
-#include <linux/cpu.h>
-#include <linux/pm.h>
-#include <linux/elf.h>
-#include <linux/errno.h>
-#include <linux/kallsyms.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/personality.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/thread_info.h>
-#include <linux/unistd.h>
-#include <linux/efi.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/kprobes.h>
-
-#include <asm/cpu.h>
-#include <asm/delay.h>
-#include <asm/elf.h>
-#include <asm/ia32.h>
-#include <asm/irq.h>
-#include <asm/pgalloc.h>
-#include <asm/processor.h>
-#include <asm/sal.h>
-#include <asm/tlbflush.h>
-#include <asm/uaccess.h>
-#include <asm/unwind.h>
-#include <asm/user.h>
-
-#include "entry.h"
-
-#ifdef CONFIG_PERFMON
-# include <asm/perfmon.h>
-#endif
-
-#include "sigframe.h"
-
-void (*ia64_mark_idle)(int);
-static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-
-unsigned long boot_option_idle_override = 0;
-EXPORT_SYMBOL(boot_option_idle_override);
-#endif
-
-void
-ia64_do_show_stack (struct unw_frame_info *info, void *arg)
-{
- unsigned long ip, sp, bsp;
- char buf[128]; /* don't make it so big that it overflows the stack! */
-
- printk("\nCall Trace:\n");
- do {
- unw_get_ip(info, &ip);
- if (ip == 0)
- break;
-
- unw_get_sp(info, &sp);
- unw_get_bsp(info, &bsp);
- snprintf(buf, sizeof(buf),
- " [<%016lx>] %%s\n"
- " sp=%016lx bsp=%016lx\n",
- ip, sp, bsp);
- print_symbol(buf, ip);
- } while (unw_unwind(info) >= 0);
-}
-
-void
-show_stack (struct task_struct *task, unsigned long *sp)
-{
- if (!task)
- unw_init_running(ia64_do_show_stack, NULL);
- else {
- struct unw_frame_info info;
-
- unw_init_from_blocked_task(&info, task);
- ia64_do_show_stack(&info, NULL);
- }
-}
-
-void
-dump_stack (void)
-{
- show_stack(NULL, NULL);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
-#ifdef XEN
-void
-show_registers(struct pt_regs *regs)
-#else
-void
-show_regs (struct pt_regs *regs)
-#endif
-{
- unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
-
-#ifndef XEN
- print_modules();
- printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm);
- printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
- regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
-#else
- struct vcpu* vcpu = current;
- if (vcpu != NULL) {
- struct domain* d = vcpu->domain;
- printk("d 0x%p domid %d\n", d, d->domain_id);
- printk("vcpu 0x%p vcpu %d\n",
- vcpu, vcpu->vcpu_id);
- }
- printk("\nCPU %d\n", smp_processor_id());
- printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
- regs->cr_ipsr, regs->cr_ifs, ip);
-#endif
- print_symbol("ip is at %s\n", ip);
- printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
- regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
- printk("rnat: %016lx bsps: %016lx pr : %016lx\n",
- regs->ar_rnat, regs->ar_bspstore, regs->pr);
- printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
- regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
- printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
- printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7);
- printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
- regs->f6.u.bits[1], regs->f6.u.bits[0],
- regs->f7.u.bits[1], regs->f7.u.bits[0]);
- printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
- regs->f8.u.bits[1], regs->f8.u.bits[0],
- regs->f9.u.bits[1], regs->f9.u.bits[0]);
- printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
- regs->f10.u.bits[1], regs->f10.u.bits[0],
- regs->f11.u.bits[1], regs->f11.u.bits[0]);
-
- printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3);
- printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
- printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13);
- printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16);
- printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19);
- printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22);
- printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25);
- printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28);
- printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31);
-
-#ifndef XEN
- if (user_mode(regs)) {
- /* print the stacked registers */
- unsigned long val, *bsp, ndirty;
- int i, sof, is_nat = 0;
-
- sof = regs->cr_ifs & 0x7f; /* size of frame */
- ndirty = (regs->loadrs >> 19);
- bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty);
- for (i = 0; i < sof; ++i) {
- get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i));
- printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val,
- ((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
- }
- } else
-#endif
- show_stack(NULL, NULL);
-}
-
-#ifndef XEN
-void
-do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
-{
- if (fsys_mode(current, &scr->pt)) {
- /* defer signal-handling etc. until we return to privilege-level 0. */
- if (!ia64_psr(&scr->pt)->lp)
- ia64_psr(&scr->pt)->lp = 1;
- return;
- }
-
-#ifdef CONFIG_PERFMON
- if (current->thread.pfm_needs_checking)
- pfm_handle_work();
-#endif
-
- /* deal with pending signal delivery */
- if (test_thread_flag(TIF_SIGPENDING))
- ia64_do_signal(oldset, scr, in_syscall);
-}
-
-static int pal_halt = 1;
-static int can_do_pal_halt = 1;
-
-static int __init nohalt_setup(char * str)
-{
- pal_halt = can_do_pal_halt = 0;
- return 1;
-}
-__setup("nohalt", nohalt_setup);
-
-void
-update_pal_halt_status(int status)
-{
- can_do_pal_halt = pal_halt && status;
-}
-
-/*
- * We use this if we don't have any better idle routine..
- */
-void
-default_idle (void)
-{
- local_irq_enable();
- while (!need_resched())
- if (can_do_pal_halt)
- safe_halt();
- else
- cpu_relax();
-}
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-/* We don't actually take CPU down, just spin without interrupts. */
-#ifndef XEN
-static inline void play_dead(void)
-#else
-void play_dead(void)
-#endif
-{
- extern void ia64_cpu_local_tick (void);
- unsigned int this_cpu = smp_processor_id();
-
- /* Ack it */
- __get_cpu_var(cpu_state) = CPU_DEAD;
-
- max_xtp();
- local_irq_disable();
- ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
- /*
- * The above is a point of no-return, the processor is
- * expected to be in SAL loop now.
- */
- BUG();
-}
-#else
-#ifndef XEN
-static inline void play_dead(void)
-#else
-void play_dead(void)
-#endif
-{
- BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-#ifndef XEN
-void cpu_idle_wait(void)
-{
- unsigned int cpu, this_cpu = get_cpu();
- cpumask_t map;
-
- set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
- put_cpu();
-
- cpus_clear(map);
- for_each_online_cpu(cpu) {
- per_cpu(cpu_idle_state, cpu) = 1;
- cpu_set(cpu, map);
- }
-
- __get_cpu_var(cpu_idle_state) = 0;
-
- wmb();
- do {
- ssleep(1);
- for_each_online_cpu(cpu) {
- if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
- cpu_clear(cpu, map);
- }
- cpus_and(map, map, cpu_online_map);
- } while (!cpus_empty(map));
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
-void __attribute__((noreturn))
-cpu_idle (void)
-{
- void (*mark_idle)(int) = ia64_mark_idle;
-
- /* endless idle loop with no priority at all */
- while (1) {
-#ifdef CONFIG_SMP
- if (!need_resched())
- min_xtp();
-#endif
- while (!need_resched()) {
- void (*idle)(void);
-
- if (__get_cpu_var(cpu_idle_state))
- __get_cpu_var(cpu_idle_state) = 0;
-
- rmb();
- if (mark_idle)
- (*mark_idle)(1);
-
- idle = pm_idle;
- if (!idle)
- idle = default_idle;
- (*idle)();
- }
-
- if (mark_idle)
- (*mark_idle)(0);
-
-#ifdef CONFIG_SMP
- normal_xtp();
-#endif
- schedule();
- check_pgt_cache();
- if (cpu_is_offline(smp_processor_id()))
- play_dead();
- }
-}
-
-void
-ia64_save_extra (struct task_struct *task)
-{
-#ifdef CONFIG_PERFMON
- unsigned long info;
-#endif
-
- if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
- ia64_save_debug_regs(&task->thread.dbr[0]);
-
-#ifdef CONFIG_PERFMON
- if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
- pfm_save_regs(task);
-
- info = __get_cpu_var(pfm_syst_info);
- if (info & PFM_CPUINFO_SYST_WIDE)
- pfm_syst_wide_update_task(task, info, 0);
-#endif
-
-#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(task)))
- ia32_save_state(task);
-#endif
-}
-
-void
-ia64_load_extra (struct task_struct *task)
-{
-#ifdef CONFIG_PERFMON
- unsigned long info;
-#endif
-
- if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
- ia64_load_debug_regs(&task->thread.dbr[0]);
-
-#ifdef CONFIG_PERFMON
- if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
- pfm_load_regs(task);
-
- info = __get_cpu_var(pfm_syst_info);
- if (info & PFM_CPUINFO_SYST_WIDE)
- pfm_syst_wide_update_task(task, info, 1);
-#endif
-
-#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(task)))
- ia32_load_state(task);
-#endif
-}
-
-/*
- * Copy the state of an ia-64 thread.
- *
- * We get here through the following call chain:
- *
- * from user-level: from kernel:
- *
- * <clone syscall> <some kernel call frames>
- * sys_clone :
- * do_fork do_fork
- * copy_thread copy_thread
- *
- * This means that the stack layout is as follows:
- *
- * +---------------------+ (highest addr)
- * | struct pt_regs |
- * +---------------------+
- * | struct switch_stack |
- * +---------------------+
- * | |
- * | memory stack |
- * | | <-- sp (lowest addr)
- * +---------------------+
- *
- * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an
- * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
- * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the
- * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since
- * the stack is page aligned and the page size is at least 4KB, this is always the case,
- * so there is nothing to worry about.
- */
-int
-copy_thread (int nr, unsigned long clone_flags,
- unsigned long user_stack_base, unsigned long user_stack_size,
- struct task_struct *p, struct pt_regs *regs)
-{
- extern char ia64_ret_from_clone, ia32_ret_from_clone;
- struct switch_stack *child_stack, *stack;
- unsigned long rbs, child_rbs, rbs_size;
- struct pt_regs *child_ptregs;
- int retval = 0;
-
-#ifdef CONFIG_SMP
- /*
- * For SMP idle threads, fork_by_hand() calls do_fork with
- * NULL regs.
- */
- if (!regs)
- return 0;
-#endif
-
- stack = ((struct switch_stack *) regs) - 1;
-
- child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;
- child_stack = (struct switch_stack *) child_ptregs - 1;
-
- /* copy parent's switch_stack & pt_regs to child: */
- memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));
-
- rbs = (unsigned long) current + IA64_RBS_OFFSET;
- child_rbs = (unsigned long) p + IA64_RBS_OFFSET;
- rbs_size = stack->ar_bspstore - rbs;
-
- /* copy the parent's register backing store to the child: */
- memcpy((void *) child_rbs, (void *) rbs, rbs_size);
-
- if (likely(user_mode(child_ptregs))) {
- if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs))
- child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
- if (user_stack_base) {
- child_ptregs->r12 = user_stack_base + user_stack_size - 16;
- child_ptregs->ar_bspstore = user_stack_base;
- child_ptregs->ar_rnat = 0;
- child_ptregs->loadrs = 0;
- }
- } else {
- /*
- * Note: we simply preserve the relative position of
- * the stack pointer here. There is no need to
- * allocate a scratch area here, since that will have
- * been taken care of by the caller of sys_clone()
- * already.
- */
- child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */
- child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */
- }
- child_stack->ar_bspstore = child_rbs + rbs_size;
- if (IS_IA32_PROCESS(regs))
- child_stack->b0 = (unsigned long) &ia32_ret_from_clone;
- else
- child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
-
- /* copy parts of thread_struct: */
- p->thread.ksp = (unsigned long) child_stack - 16;
-
- /* stop some PSR bits from being inherited.
- * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
- * therefore we must specify them explicitly here and not include them in
- * IA64_PSR_BITS_TO_CLEAR.
- */
- child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
- & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
-
- /*
- * NOTE: The calling convention considers all floating point
- * registers in the high partition (fph) to be scratch. Since
- * the only way to get to this point is through a system call,
- * we know that the values in fph are all dead. Hence, there
- * is no need to inherit the fph state from the parent to the
- * child and all we have to do is to make sure that
- * IA64_THREAD_FPH_VALID is cleared in the child.
- *
- * XXX We could push this optimization a bit further by
- * clearing IA64_THREAD_FPH_VALID on ANY system call.
- * However, it's not clear this is worth doing. Also, it
- * would be a slight deviation from the normal Linux system
- * call behavior where scratch registers are preserved across
- * system calls (unless used by the system call itself).
- */
-# define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \
- | IA64_THREAD_PM_VALID)
-# define THREAD_FLAGS_TO_SET 0
- p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
- | THREAD_FLAGS_TO_SET);
- ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
-#ifdef CONFIG_IA32_SUPPORT
- /*
- * If we're cloning an IA32 task then save the IA32 extra
- * state from the current task to the new task
- */
- if (IS_IA32_PROCESS(ia64_task_regs(current))) {
- ia32_save_state(p);
- if (clone_flags & CLONE_SETTLS)
- retval = ia32_clone_tls(p, child_ptregs);
-
- /* Copy partially mapped page list */
- if (!retval)
- retval = ia32_copy_partial_page_list(p, clone_flags);
- }
-#endif
-
-#ifdef CONFIG_PERFMON
- if (current->thread.pfm_context)
- pfm_inherit(p, child_ptregs);
-#endif
- return retval;
-}
-
-#endif /* !XEN */
-
-static void
-do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
-{
- unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm;
- elf_greg_t *dst = arg;
- struct pt_regs *pt;
- char nat;
- int i;
-
- memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */
-
- if (unw_unwind_to_user(info) < 0)
- return;
-
- unw_get_sp(info, &sp);
- pt = (struct pt_regs *) (sp + 16);
-
-#ifndef XEN
- /* FIXME: Is this needed by XEN when it makes its crash notes
- * during kdump? */
- urbs_end = ia64_get_user_rbs_end(task, pt, &cfm);
-
- if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0)
- return;
-
- ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end),
- &ar_rnat);
-#else
-
- ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((u64 *) urbs_end),
- (long *)&ar_rnat);
-#endif
-
- /*
- * coredump format:
- * r0-r31
- * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
- * predicate registers (p0-p63)
- * b0-b7
- * ip cfm user-mask
- * ar.rsc ar.bsp ar.bspstore ar.rnat
- * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
- */
-
- /* r0 is zero */
- for (i = 1, mask = (1UL << i); i < 32; ++i) {
- unw_get_gr(info, i, &dst[i], &nat);
- if (nat)
- nat_bits |= mask;
- mask <<= 1;
- }
- dst[32] = nat_bits;
- unw_get_pr(info, &dst[33]);
-
- for (i = 0; i < 8; ++i)
- unw_get_br(info, i, &dst[34 + i]);
-
- unw_get_rp(info, &ip);
- dst[42] = ip + ia64_psr(pt)->ri;
- dst[43] = cfm;
- dst[44] = pt->cr_ipsr & IA64_PSR_UM;
-
- unw_get_ar(info, UNW_AR_RSC, &dst[45]);
- /*
- * For bsp and bspstore, unw_get_ar() would return the kernel
- * addresses, but we need the user-level addresses instead:
- */
- dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */
- dst[47] = pt->ar_bspstore;
- dst[48] = ar_rnat;
- unw_get_ar(info, UNW_AR_CCV, &dst[49]);
- unw_get_ar(info, UNW_AR_UNAT, &dst[50]);
- unw_get_ar(info, UNW_AR_FPSR, &dst[51]);
- dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
- unw_get_ar(info, UNW_AR_LC, &dst[53]);
- unw_get_ar(info, UNW_AR_EC, &dst[54]);
- unw_get_ar(info, UNW_AR_CSD, &dst[55]);
- unw_get_ar(info, UNW_AR_SSD, &dst[56]);
-}
-
-#ifndef XEN
-
-void
-do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg)
-{
- elf_fpreg_t *dst = arg;
- int i;
-
- memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */
-
- if (unw_unwind_to_user(info) < 0)
- return;
-
- /* f0 is 0.0, f1 is 1.0 */
-
- for (i = 2; i < 32; ++i)
- unw_get_fr(info, i, dst + i);
-
- ia64_flush_fph(task);
- if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0)
- memcpy(dst + 32, task->thread.fph, 96*16);
-}
-
-#endif /* !XEN */
-
-void
-do_copy_regs (struct unw_frame_info *info, void *arg)
-{
- do_copy_task_regs(current, info, arg);
-}
-
-#ifndef XEN
-
-void
-do_dump_fpu (struct unw_frame_info *info, void *arg)
-{
- do_dump_task_fpu(current, info, arg);
-}
-
-int
-dump_task_regs(struct task_struct *task, elf_gregset_t *regs)
-{
- struct unw_frame_info tcore_info;
-
- if (current == task) {
- unw_init_running(do_copy_regs, regs);
- } else {
- memset(&tcore_info, 0, sizeof(tcore_info));
- unw_init_from_blocked_task(&tcore_info, task);
- do_copy_task_regs(task, &tcore_info, regs);
- }
- return 1;
-}
-
-#endif /* !XEN */
-
-void
-ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
-{
- unw_init_running(do_copy_regs, dst);
-}
-
-#ifndef XEN
-
-int
-dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst)
-{
- struct unw_frame_info tcore_info;
-
- if (current == task) {
- unw_init_running(do_dump_fpu, dst);
- } else {
- memset(&tcore_info, 0, sizeof(tcore_info));
- unw_init_from_blocked_task(&tcore_info, task);
- do_dump_task_fpu(task, &tcore_info, dst);
- }
- return 1;
-}
-
-int
-dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
-{
- unw_init_running(do_dump_fpu, dst);
- return 1; /* f0-f31 are always valid so we always return 1 */
-}
-
-long
-sys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp,
- struct pt_regs *regs)
-{
- char *fname;
- int error;
-
- fname = getname(filename);
- error = PTR_ERR(fname);
- if (IS_ERR(fname))
- goto out;
- error = do_execve(fname, argv, envp, regs);
- putname(fname);
-out:
- return error;
-}
-
-pid_t
-kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
-{
- extern void start_kernel_thread (void);
- unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread;
- struct {
- struct switch_stack sw;
- struct pt_regs pt;
- } regs;
-
- memset(&regs, 0, sizeof(regs));
- regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */
- regs.pt.r1 = helper_fptr[1]; /* set GP */
- regs.pt.r9 = (unsigned long) fn; /* 1st argument */
- regs.pt.r11 = (unsigned long) arg; /* 2nd argument */
- /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */
- regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;
- regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */
- regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR);
- regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET;
- regs.sw.pr = (1 << PRED_KERNEL_STACK);
- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs.pt, 0, NULL, NULL);
-}
-EXPORT_SYMBOL(kernel_thread);
-
-/* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */
-int
-kernel_thread_helper (int (*fn)(void *), void *arg)
-{
-#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(current))) {
- /* A kernel thread is always a 64-bit process. */
- current->thread.map_base = DEFAULT_MAP_BASE;
- current->thread.task_size = DEFAULT_TASK_SIZE;
- ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
- ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
- }
-#endif
- return (*fn)(arg);
-}
-
-/*
- * Flush thread state. This is called when a thread does an execve().
- */
-void
-flush_thread (void)
-{
- /*
- * Remove function-return probe instances associated with this task
- * and put them back on the free list. Do not insert an exit probe for
- * this function, it will be disabled by kprobe_flush_task if you do.
- */
- kprobe_flush_task(current);
-
- /* drop floating-point and debug-register state if it exists: */
- current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
- ia64_drop_fpu(current);
- if (IS_IA32_PROCESS(ia64_task_regs(current)))
- ia32_drop_partial_page_list(current);
-}
-
-/*
- * Clean up state associated with current thread. This is called when
- * the thread calls exit().
- */
-void
-exit_thread (void)
-{
-
- /*
- * Remove function-return probe instances associated with this task
- * and put them back on the free list. Do not insert an exit probe for
- * this function, it will be disabled by kprobe_flush_task if you do.
- */
- kprobe_flush_task(current);
-
- ia64_drop_fpu(current);
-#ifdef CONFIG_PERFMON
- /* if needed, stop monitoring and flush state to perfmon context */
- if (current->thread.pfm_context)
- pfm_exit_thread(current);
-
- /* free debug register resources */
- if (current->thread.flags & IA64_THREAD_DBG_VALID)
- pfm_release_debug_registers(current);
-#endif
- if (IS_IA32_PROCESS(ia64_task_regs(current)))
- ia32_drop_partial_page_list(current);
-}
-
-unsigned long
-get_wchan (struct task_struct *p)
-{
- struct unw_frame_info info;
- unsigned long ip;
- int count = 0;
-
- /*
- * Note: p may not be a blocked task (it could be current or
- * another process running on some other CPU. Rather than
- * trying to determine if p is really blocked, we just assume
- * it's blocked and rely on the unwind routines to fail
- * gracefully if the process wasn't really blocked after all.
- * --davidm 99/12/15
- */
- unw_init_from_blocked_task(&info, p);
- do {
- if (unw_unwind(&info) < 0)
- return 0;
- unw_get_ip(&info, &ip);
- if (!in_sched_functions(ip))
- return ip;
- } while (count++ < 16);
- return 0;
-}
-#endif // !XEN
-
-void
-cpu_halt (void)
-{
- pal_power_mgmt_info_u_t power_info[8];
- unsigned long min_power;
- int i, min_power_state;
-
- if (ia64_pal_halt_info(power_info) != 0)
- return;
-
- min_power_state = 0;
- min_power = power_info[0].pal_power_mgmt_info_s.power_consumption;
- for (i = 1; i < 8; ++i)
- if (power_info[i].pal_power_mgmt_info_s.im
- && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) {
- min_power = power_info[i].pal_power_mgmt_info_s.power_consumption;
- min_power_state = i;
- }
-
- while (1)
- ia64_pal_halt(min_power_state);
-}
-
-#ifndef XEN
-void
-machine_restart (char *restart_cmd)
-{
- (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL);
-}
-
-void
-machine_halt (void)
-{
- cpu_halt();
-}
-
-void
-machine_power_off (void)
-{
- if (pm_power_off)
- pm_power_off();
- machine_halt();
-}
-#endif // !XEN
diff --git a/xen/arch/ia64/linux-xen/sal.c b/xen/arch/ia64/linux-xen/sal.c
deleted file mode 100644
index 564dfa4730..0000000000
--- a/xen/arch/ia64/linux-xen/sal.c
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * System Abstraction Layer (SAL) interface routines.
- *
- * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- */
-#include <linux/config.h>
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-
-#ifdef XEN
-#include <linux/smp.h>
-#include <asm/hw_irq.h>
-#include <xen/lib.h>
-#endif
-#include <asm/delay.h>
-#include <asm/page.h>
-#include <asm/sal.h>
-#include <asm/pal.h>
-
- __cacheline_aligned DEFINE_SPINLOCK(sal_lock);
-unsigned long sal_platform_features;
-
-unsigned short sal_revision;
-unsigned short sal_version;
-
-#define SAL_MAJOR(x) ((x) >> 8)
-#define SAL_MINOR(x) ((x) & 0xff)
-
-static struct {
- void *addr; /* function entry point */
- void *gpval; /* gp value to use */
-} pdesc;
-
-static long
-default_handler (void)
-{
- return -1;
-}
-
-ia64_sal_handler ia64_sal = (ia64_sal_handler) default_handler;
-ia64_sal_desc_ptc_t *ia64_ptc_domain_info;
-
-const char *
-ia64_sal_strerror (long status)
-{
- const char *str;
- switch (status) {
- case 0: str = "Call completed without error"; break;
- case 1: str = "Effect a warm boot of the system to complete "
- "the update"; break;
- case -1: str = "Not implemented"; break;
- case -2: str = "Invalid argument"; break;
- case -3: str = "Call completed with error"; break;
- case -4: str = "Virtual address not registered"; break;
- case -5: str = "No information available"; break;
- case -6: str = "Insufficient space to add the entry"; break;
- case -7: str = "Invalid entry_addr value"; break;
- case -8: str = "Invalid interrupt vector"; break;
- case -9: str = "Requested memory not available"; break;
- case -10: str = "Unable to write to the NVM device"; break;
- case -11: str = "Invalid partition type specified"; break;
- case -12: str = "Invalid NVM_Object id specified"; break;
- case -13: str = "NVM_Object already has the maximum number "
- "of partitions"; break;
- case -14: str = "Insufficient space in partition for the "
- "requested write sub-function"; break;
- case -15: str = "Insufficient data buffer space for the "
- "requested read record sub-function"; break;
- case -16: str = "Scratch buffer required for the write/delete "
- "sub-function"; break;
- case -17: str = "Insufficient space in the NVM_Object for the "
- "requested create sub-function"; break;
- case -18: str = "Invalid value specified in the partition_rec "
- "argument"; break;
- case -19: str = "Record oriented I/O not supported for this "
- "partition"; break;
- case -20: str = "Bad format of record to be written or "
- "required keyword variable not "
- "specified"; break;
- default: str = "Unknown SAL status code"; break;
- }
- return str;
-}
-
-void __init
-ia64_sal_handler_init (void *entry_point, void *gpval)
-{
- /* fill in the SAL procedure descriptor and point ia64_sal to it: */
- pdesc.addr = entry_point;
- pdesc.gpval = gpval;
- ia64_sal = (ia64_sal_handler) &pdesc;
-}
-
-static void __init
-check_versions (struct ia64_sal_systab *systab)
-{
- sal_revision = (systab->sal_rev_major << 8) | systab->sal_rev_minor;
- sal_version = (systab->sal_b_rev_major << 8) | systab->sal_b_rev_minor;
-
- /* Check for broken firmware */
- if ((sal_revision == SAL_VERSION_CODE(49, 29))
- && (sal_version == SAL_VERSION_CODE(49, 29)))
- {
- /*
- * Old firmware for zx2000 prototypes have this weird version number,
- * reset it to something sane.
- */
- sal_revision = SAL_VERSION_CODE(2, 8);
- sal_version = SAL_VERSION_CODE(0, 0);
- }
-}
-
-static void __init
-sal_desc_entry_point (void *p)
-{
- struct ia64_sal_desc_entry_point *ep = p;
- ia64_pal_handler_init(__va_efi(ep->pal_proc));
- ia64_sal_handler_init(__va_efi(ep->sal_proc), __va_efi(ep->gp));
-}
-
-#ifdef CONFIG_SMP
-static void __init
-set_smp_redirect (int flag)
-{
-#if defined(CONFIG_HOTPLUG_CPU) && !defined(XEN)
- if (no_int_routing)
- smp_int_redirect &= ~flag;
- else
- smp_int_redirect |= flag;
-#else
- /*
- * For CPU Hotplug we dont want to do any chipset supported
- * interrupt redirection. The reason is this would require that
- * All interrupts be stopped and hard bind the irq to a cpu.
- * Later when the interrupt is fired we need to set the redir hint
- * on again in the vector. This is combersome for something that the
- * user mode irq balancer will solve anyways.
- */
- no_int_routing=1;
- smp_int_redirect &= ~flag;
-#endif
-}
-#else
-#define set_smp_redirect(flag) do { } while (0)
-#endif
-
-static void __init
-sal_desc_platform_feature (void *p)
-{
- struct ia64_sal_desc_platform_feature *pf = p;
- sal_platform_features = pf->feature_mask;
-
- printk(KERN_INFO "SAL Platform features:");
- if (!sal_platform_features) {
- printk(" None\n");
- return;
- }
-
- if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_BUS_LOCK)
- printk(" BusLock");
- if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT) {
- printk(" IRQ_Redirection");
- set_smp_redirect(SMP_IRQ_REDIRECTION);
- }
- if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT) {
- printk(" IPI_Redirection");
- set_smp_redirect(SMP_IPI_REDIRECTION);
- }
- if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)
- printk(" ITC_Drift");
- printk("\n");
-}
-
-#ifdef CONFIG_SMP
-static void __init
-sal_desc_ap_wakeup (void *p)
-{
- struct ia64_sal_desc_ap_wakeup *ap = p;
-
- switch (ap->mechanism) {
- case IA64_SAL_AP_EXTERNAL_INT:
- ap_wakeup_vector = ap->vector;
- printk(KERN_INFO "SAL: AP wakeup using external interrupt "
- "vector 0x%lx\n", ap_wakeup_vector);
- break;
- default:
- printk(KERN_ERR "SAL: AP wakeup mechanism unsupported!\n");
- break;
- }
-}
-
-static void __init
-chk_nointroute_opt(void)
-{
- char *cp;
- extern char saved_command_line[];
-
- for (cp = saved_command_line; *cp; ) {
- if (memcmp(cp, "nointroute", 10) == 0) {
- no_int_routing = 1;
- printk ("no_int_routing on\n");
- break;
- } else {
- while (*cp != ' ' && *cp)
- ++cp;
- while (*cp == ' ')
- ++cp;
- }
- }
-}
-
-#else
-static void __init sal_desc_ap_wakeup(void *p) { }
-#endif
-
-/*
- * HP rx5670 firmware polls for interrupts during SAL_CACHE_FLUSH by reading
- * cr.ivr, but it never writes cr.eoi. This leaves any interrupt marked as
- * "in-service" and masks other interrupts of equal or lower priority.
- *
- * HP internal defect reports: F1859, F2775, F3031.
- */
-static int sal_cache_flush_drops_interrupts;
-
-static void __init
-check_sal_cache_flush (void)
-{
- unsigned long flags, itv;
- int cpu;
- u64 vector;
-
- cpu = get_cpu();
- local_irq_save(flags);
-
- /*
- * Schedule a timer interrupt, wait until it's reported, and see if
- * SAL_CACHE_FLUSH drops it.
- */
- itv = ia64_get_itv();
- BUG_ON((itv & (1 << 16)) == 0);
-
- ia64_set_itv(IA64_TIMER_VECTOR);
- ia64_set_itm(ia64_get_itc() + 1000);
-
- while (!ia64_get_irr(IA64_TIMER_VECTOR))
- cpu_relax();
-
- ia64_sal_cache_flush(3);
-
- if (ia64_get_irr(IA64_TIMER_VECTOR)) {
- vector = ia64_get_ivr();
- ia64_eoi();
- } else {
- sal_cache_flush_drops_interrupts = 1;
- printk(KERN_ERR "SAL: SAL_CACHE_FLUSH drops interrupts; "
- "PAL_CACHE_FLUSH will be used instead\n");
- ia64_eoi();
- }
-
- ia64_set_itv(itv);
- local_irq_restore(flags);
- put_cpu();
-}
-
-s64
-ia64_sal_cache_flush (u64 cache_type)
-{
- struct ia64_sal_retval isrv;
-
- if (sal_cache_flush_drops_interrupts) {
- unsigned long flags;
- u64 progress;
- s64 rc;
-
- progress = 0;
- local_irq_save(flags);
- rc = ia64_pal_cache_flush(cache_type,
- PAL_CACHE_FLUSH_INVALIDATE, &progress, NULL);
- local_irq_restore(flags);
- return rc;
- }
-
- SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
- return isrv.status;
-}
-
-void __init
-ia64_sal_init (struct ia64_sal_systab *systab)
-{
- char *p;
- int i;
-
- if (!systab) {
- printk(KERN_WARNING "Hmm, no SAL System Table.\n");
- return;
- }
-
-#ifdef XEN /* warning cleanup */
- if (strncmp((char *)systab->signature, "SST_", 4) != 0)
-#else
- if (strncmp(systab->signature, "SST_", 4) != 0)
-#endif
-
- printk(KERN_ERR "bad signature in system table!");
-
- check_versions(systab);
-#ifdef CONFIG_SMP
- chk_nointroute_opt();
-#endif
-
- /* revisions are coded in BCD, so %x does the job for us */
- printk(KERN_INFO "SAL %x.%x: %.32s %.32s%sversion %x.%x\n",
- SAL_MAJOR(sal_revision), SAL_MINOR(sal_revision),
- systab->oem_id, systab->product_id,
- systab->product_id[0] ? " " : "",
- SAL_MAJOR(sal_version), SAL_MINOR(sal_version));
-
- p = (char *) (systab + 1);
- for (i = 0; i < systab->entry_count; i++) {
- /*
- * The first byte of each entry type contains the type
- * descriptor.
- */
- switch (*p) {
- case SAL_DESC_ENTRY_POINT:
- sal_desc_entry_point(p);
- break;
- case SAL_DESC_PLATFORM_FEATURE:
- sal_desc_platform_feature(p);
- break;
- case SAL_DESC_PTC:
- ia64_ptc_domain_info = (ia64_sal_desc_ptc_t *)p;
- break;
- case SAL_DESC_AP_WAKEUP:
- sal_desc_ap_wakeup(p);
- break;
- }
- p += SAL_DESC_SIZE(*p);
- }
-
- check_sal_cache_flush();
-}
-
-int
-ia64_sal_oemcall(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
- u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7)
-{
- if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
- return -1;
- SAL_CALL(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
- return 0;
-}
-EXPORT_SYMBOL(ia64_sal_oemcall);
-
-int
-ia64_sal_oemcall_nolock(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
- u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
- u64 arg7)
-{
- if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
- return -1;
- SAL_CALL_NOLOCK(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
- arg7);
- return 0;
-}
-EXPORT_SYMBOL(ia64_sal_oemcall_nolock);
-
-int
-ia64_sal_oemcall_reentrant(struct ia64_sal_retval *isrvp, u64 oemfunc,
- u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5,
- u64 arg6, u64 arg7)
-{
- if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
- return -1;
- SAL_CALL_REENTRANT(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
- arg7);
- return 0;
-}
-EXPORT_SYMBOL(ia64_sal_oemcall_reentrant);
diff --git a/xen/arch/ia64/linux-xen/setup.c b/xen/arch/ia64/linux-xen/setup.c
deleted file mode 100644
index e502d44757..0000000000
--- a/xen/arch/ia64/linux-xen/setup.c
+++ /dev/null
@@ -1,1056 +0,0 @@
-/*
- * Architecture-specific setup.
- *
- * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 2000, 2004 Intel Corp
- * Rohit Seth <rohit.seth@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Gordon Jin <gordon.jin@intel.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- *
- * 12/26/04 S.Siddha, G.Jin, R.Seth
- * Add multi-threading and multi-core detection
- * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
- * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
- * 03/31/00 R.Seth cpu_initialized and current->processor fixes
- * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
- * 02/01/00 R.Seth fixed get_cpuinfo for SMP
- * 01/07/99 S.Eranian added the support for command line argument
- * 06/24/99 W.Drummond added boot_cpu_data.
- * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
- */
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <linux/acpi.h>
-#include <linux/bootmem.h>
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/shutdown.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/threads.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/efi.h>
-#include <linux/initrd.h>
-#include <linux/platform.h>
-#include <linux/pm.h>
-
-#include <asm/ia32.h>
-#include <asm/machvec.h>
-#include <asm/mca.h>
-#include <asm/meminit.h>
-#include <asm/page.h>
-#include <asm/patch.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/sal.h>
-#include <asm/sections.h>
-#include <asm/serial.h>
-#include <asm/setup.h>
-#include <asm/smp.h>
-#include <asm/system.h>
-#include <asm/unistd.h>
-#ifdef XEN
-#include <asm/vmx.h>
-#include <asm/io.h>
-#include <asm/kexec.h>
-#include <public/kexec.h>
-#include <xen/kexec.h>
-#endif
-
-#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
-# error "struct cpuinfo_ia64 too big!"
-#endif
-
-#ifdef CONFIG_SMP
-unsigned long __per_cpu_offset[NR_CPUS];
-EXPORT_SYMBOL(__per_cpu_offset);
-#endif
-
-DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
-#ifdef XEN
-DEFINE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
-#endif
-DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
-DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
-unsigned long ia64_cycles_per_usec;
-struct ia64_boot_param *ia64_boot_param;
-struct screen_info screen_info;
-unsigned long vga_console_iobase;
-unsigned long vga_console_membase;
-
-unsigned long ia64_max_cacheline_size;
-unsigned long ia64_iobase; /* virtual address for I/O accesses */
-EXPORT_SYMBOL(ia64_iobase);
-struct io_space io_space[MAX_IO_SPACES];
-EXPORT_SYMBOL(io_space);
-unsigned int num_io_spaces;
-
-#ifdef XEN
-extern void early_cmdline_parse(char **);
-extern unsigned int ns16550_com1_gsi;
-#endif
-
-/*
- * "flush_icache_range()" needs to know what processor dependent stride size to use
- * when it makes i-cache(s) coherent with d-caches.
- */
-#define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
-unsigned long ia64_i_cache_stride_shift = ~0;
-
-#ifdef XEN
-#define D_CACHE_STRIDE_SHIFT 5 /* Safest. */
-unsigned long ia64_d_cache_stride_shift = ~0;
-#endif
-
-/*
- * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
- * mask specifies a mask of address bits that must be 0 in order for two buffers to be
- * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
- * address of the second buffer must be aligned to (merge_mask+1) in order to be
- * mergeable). By default, we assume there is no I/O MMU which can merge physically
- * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
- * page-size of 2^64.
- */
-unsigned long ia64_max_iommu_merge_mask = ~0UL;
-EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
-
-/*
- * We use a special marker for the end of memory and it uses the extra (+1) slot
- */
-struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
-int num_rsvd_regions;
-
-
-/*
- * Filter incoming memory segments based on the primitive map created from the boot
- * parameters. Segments contained in the map are removed from the memory ranges. A
- * caller-specified function is called with the memory ranges that remain after filtering.
- * This routine does not assume the incoming segments are sorted.
- */
-int
-filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
-{
- unsigned long range_start, range_end, prev_start;
- void (*func)(unsigned long, unsigned long, int);
- int i;
-
-#if IGNORE_PFN0
- if (start == PAGE_OFFSET) {
- printk(KERN_WARNING "warning: skipping physical page 0\n");
- start += PAGE_SIZE;
- if (start >= end) return 0;
- }
-#endif
- /*
- * lowest possible address(walker uses virtual)
- */
- prev_start = PAGE_OFFSET;
- func = arg;
-
- for (i = 0; i < num_rsvd_regions; ++i) {
- range_start = max(start, prev_start);
- range_end = min(end, rsvd_region[i].start);
-
- if (range_start < range_end)
-#ifdef XEN
- {
- /* init_boot_pages requires "ps, pe" */
- printk("Init boot pages: 0x%lx -> 0x%lx.\n",
- __pa(range_start), __pa(range_end));
- (*func)(__pa(range_start), __pa(range_end), 0);
- }
-#else
- call_pernode_memory(__pa(range_start), range_end - range_start, func);
-#endif
-
- /* nothing more available in this segment */
- if (range_end == end) return 0;
-
- prev_start = rsvd_region[i].end;
- }
- /* end of memory marker allows full processing inside loop body */
- return 0;
-}
-
-static void
-sort_regions (struct rsvd_region *rsvd_region, int max)
-{
- int j;
-
- /* simple bubble sorting */
- while (max--) {
- for (j = 0; j < max; ++j) {
- if (rsvd_region[j].start > rsvd_region[j+1].start) {
- struct rsvd_region tmp;
- tmp = rsvd_region[j];
- rsvd_region[j] = rsvd_region[j + 1];
- rsvd_region[j + 1] = tmp;
- }
- }
- }
-}
-
-/**
- * reserve_memory - setup reserved memory areas
- *
- * Setup the reserved memory areas set aside for the boot parameters,
- * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
- * see include/asm-ia64/meminit.h if you need to define more.
- */
-void
-reserve_memory (void)
-{
- int n = 0;
-
- /*
- * none of the entries in this table overlap
- */
- rsvd_region[n].start = (unsigned long) ia64_boot_param;
- rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
- n++;
-
- rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
- rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
- n++;
-
- rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
- rsvd_region[n].end = (rsvd_region[n].start
- + strlen(__va(ia64_boot_param->command_line)) + 1);
- n++;
-
- rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
-#ifdef XEN
- /* Reserve xen image/bitmap/xen-heap */
- rsvd_region[n].end = rsvd_region[n].start + xenheap_size;
-#else
- rsvd_region[n].end = (unsigned long) ia64_imva(_end);
-#endif
- n++;
-
-#ifdef XEN
- rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->domain_start);
- rsvd_region[n].end = (rsvd_region[n].start + ia64_boot_param->domain_size);
- n++;
-#endif
-
-#if defined(XEN)||defined(CONFIG_BLK_DEV_INITRD)
- if (ia64_boot_param->initrd_start) {
- rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
- rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
- n++;
- }
-#endif
-
- efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
- n++;
-
-#ifdef XEN
- /* crashkernel=size@offset specifies the size to reserve for a crash
- * kernel. If offset is 0, then it is determined automatically.
- * By reserving this memory we guarantee that linux never set's it
- * up as a DMA target. Useful for holding code to do something
- * appropriate after a kernel panic.
- */
- if (kexec_crash_area.size > 0) {
- if (!kexec_crash_area.start) {
- sort_regions(rsvd_region, n);
- kexec_crash_area.start =
- kdump_find_rsvd_region(kexec_crash_area.size,
- rsvd_region, n);
- }
- if (kexec_crash_area.start != ~0UL) {
- printk("Kdump: %luMB (%lukB) at 0x%lx\n",
- kexec_crash_area.size >> 20,
- kexec_crash_area.size >> 10,
- kexec_crash_area.start);
- rsvd_region[n].start =
- (unsigned long)__va(kexec_crash_area.start);
- rsvd_region[n].end =
- (unsigned long)__va(kexec_crash_area.start +
- kexec_crash_area.size);
- n++;
- }
- else {
- kexec_crash_area.size = 0;
- kexec_crash_area.start = 0;
- }
- }
-#endif
-
- /* end of memory marker */
- rsvd_region[n].start = ~0UL;
- rsvd_region[n].end = ~0UL;
- n++;
-
- num_rsvd_regions = n;
-
- sort_regions(rsvd_region, num_rsvd_regions);
-}
-
-/**
- * find_initrd - get initrd parameters from the boot parameter structure
- *
- * Grab the initrd start and end from the boot parameter struct given us by
- * the boot loader.
- */
-void
-find_initrd (void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- if (ia64_boot_param->initrd_start) {
- initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
- initrd_end = initrd_start+ia64_boot_param->initrd_size;
-
- printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
- initrd_start, ia64_boot_param->initrd_size);
- }
-#endif
-}
-
-static void __init
-io_port_init (void)
-{
- extern unsigned long ia64_iobase;
- unsigned long phys_iobase;
-
- /*
- * Set `iobase' to the appropriate address in region 6 (uncached access range).
- *
- * The EFI memory map is the "preferred" location to get the I/O port space base,
- * rather the relying on AR.KR0. This should become more clear in future SAL
- * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is
- * found in the memory map.
- */
- phys_iobase = efi_get_iobase();
- if (phys_iobase)
- /* set AR.KR0 since this is all we use it for anyway */
- ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);
- else {
- phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
- printk(KERN_INFO "No I/O port range found in EFI memory map, falling back "
- "to AR.KR0\n");
- printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase);
- }
- ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
-
- /* setup legacy IO port space */
- io_space[0].mmio_base = ia64_iobase;
- io_space[0].sparse = 1;
- num_io_spaces = 1;
-}
-
-#ifdef XEN
-static int __init
-acpi_oem_console_setup(void)
-{
- extern struct ns16550_defaults ns16550_com1;
- efi_system_table_t *systab;
- efi_config_table_t *tables;
- struct acpi_table_rsdp *rsdp = NULL;
- struct acpi_table_xsdt *xsdt;
- struct acpi_table_header *hdr;
- int i;
-
- /* Don't duplicate setup if an HCDP table is present */
- if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
- return -ENODEV;
-
- /* Manually walk firmware provided tables to get to the XSDT. */
- systab = __va(ia64_boot_param->efi_systab);
-
- if (!systab || systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
- return -ENODEV;
-
- tables = __va(systab->tables);
-
- for (i = 0 ; i < (int)systab->nr_tables && !rsdp ; i++) {
- if (efi_guidcmp(tables[i].guid, ACPI_20_TABLE_GUID) == 0)
- rsdp =
- (struct acpi_table_rsdp *)__va(tables[i].table);
- }
-
- if (!rsdp ||
- strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1))
- return -ENODEV;
-
- xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
- hdr = &xsdt->header;
-
- if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1))
- return -ENODEV;
-
- /* Looking for Fujitsu PRIMEQUEST systems */
- if (!strncmp(hdr->oem_id, "FUJITSPQ", 8) &&
- (!strncmp(hdr->oem_table_id, "PQ", 2))){
- ns16550_com1.baud = BAUD_AUTO;
- ns16550_com1.io_base = 0x3f8;
- ns16550_com1.irq = ns16550_com1_gsi = 4;
- return 0;
- }
-
- /*
- * Looking for Intel Tiger systems
- * Tiger 2: SR870BH2
- * Tiger 4: SR870BN4
- */
- if (!strncmp(hdr->oem_id, "INTEL", 5)) {
- if (!strncmp(hdr->oem_table_id, "SR870BH2", 8) ||
- !strncmp(hdr->oem_table_id, "SR870BN4", 8)) {
- ns16550_com1.baud = BAUD_AUTO;
- ns16550_com1.io_base = 0x2f8;
- ns16550_com1.irq = 3;
- return 0;
- } else {
- ns16550_com1.baud = BAUD_AUTO;
- ns16550_com1.io_base = 0x3f8;
- ns16550_com1.irq = ns16550_com1_gsi = 4;
- return 0;
- }
- }
- return -ENODEV;
-}
-#endif
-
-/**
- * early_console_setup - setup debugging console
- *
- * Consoles started here require little enough setup that we can start using
- * them very early in the boot process, either right after the machine
- * vector initialization, or even before if the drivers can detect their hw.
- *
- * Returns non-zero if a console couldn't be setup.
- */
-static inline int __init
-early_console_setup (char *cmdline)
-{
- int earlycons = 0;
-
-#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
- {
- extern int sn_serial_console_early_setup(void);
- if (!sn_serial_console_early_setup())
- earlycons++;
- }
-#endif
-#ifdef CONFIG_EFI_PCDP
- if (!efi_setup_pcdp_console(cmdline))
- earlycons++;
-#endif
-#ifdef CONFIG_SERIAL_8250_CONSOLE
- if (!early_serial_console_init(cmdline))
- earlycons++;
-#endif
-
-#ifdef XEN
- if (!acpi_oem_console_setup())
- earlycons++;
-#endif
- return (earlycons) ? 0 : -1;
-}
-
-static inline void
-mark_bsp_online (void)
-{
-#ifdef CONFIG_SMP
- /* If we register an early console, allow CPU 0 to printk */
- cpumask_set_cpu(smp_processor_id(), &cpu_online_map);
-#endif
-}
-
-#ifdef CONFIG_SMP
-static void
-check_for_logical_procs (void)
-{
- pal_logical_to_physical_t info;
- s64 status;
-
- status = ia64_pal_logical_to_phys(0, &info);
- if (status == -1) {
- printk(KERN_INFO "No logical to physical processor mapping "
- "available\n");
- return;
- }
- if (status) {
- printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
- status);
- return;
- }
- /*
- * Total number of siblings that BSP has. Though not all of them
- * may have booted successfully. The correct number of siblings
- * booted is in info.overview_num_log.
- */
- smp_num_siblings = info.overview_tpc;
- smp_num_cpucores = info.overview_cpp;
-}
-#endif
-
-void __init
-#ifdef XEN
-early_setup_arch (char **cmdline_p)
-#else
-setup_arch (char **cmdline_p)
-#endif
-{
- unw_init();
-
- ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
-
- *cmdline_p = __va(ia64_boot_param->command_line);
-#ifndef XEN
- strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
-#else
- early_cmdline_parse(cmdline_p);
- cmdline_parse(*cmdline_p);
-#endif
-
- efi_init();
- io_port_init();
-
-#ifdef CONFIG_IA64_GENERIC
- {
- const char *mvec_name = strstr (*cmdline_p, "machvec=");
- char str[64];
-
- if (mvec_name) {
- const char *end;
- size_t len;
-
- mvec_name += 8;
- end = strchr (mvec_name, ' ');
- if (end)
- len = end - mvec_name;
- else
- len = strlen (mvec_name);
- len = min(len, sizeof (str) - 1);
- strlcpy (str, mvec_name, len);
- mvec_name = str;
- } else
- mvec_name = acpi_get_sysname();
- machvec_init(mvec_name);
- }
-#endif
-
- if (early_console_setup(*cmdline_p) == 0)
- mark_bsp_online();
-
-#ifdef CONFIG_ACPI_BOOT
- /* Initialize the ACPI boot-time table parser */
- acpi_table_init();
-# ifdef CONFIG_ACPI_NUMA
- acpi_numa_init();
-# endif
-#else
-# ifdef CONFIG_SMP
- smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
-# endif
-#endif /* CONFIG_APCI_BOOT */
-
-#ifdef XEN
-}
-
-void __init
-late_setup_arch (char **cmdline_p)
-{
-#endif
-#ifndef XEN
- find_memory();
-
- /* process SAL system table: */
- ia64_sal_init(efi.sal_systab);
-#endif
-
-#ifdef CONFIG_SMP
-#ifdef XEN
- init_smp_config ();
-#endif
-
- cpu_physical_id(0) = hard_smp_processor_id();
-
- if (!zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, 0)) ||
- !zalloc_cpumask_var(&per_cpu(cpu_core_mask, 0)))
- panic("No memory for boot CPU sibling/core maps\n");
-
- cpumask_set_cpu(0, per_cpu(cpu_sibling_mask, 0));
- cpumask_set_cpu(0, per_cpu(cpu_core_mask, 0));
-
- check_for_logical_procs();
- if (smp_num_cpucores > 1)
- printk(KERN_INFO
- "cpu package is Multi-Core capable: number of cores=%d\n",
- smp_num_cpucores);
- if (smp_num_siblings > 1)
- printk(KERN_INFO
- "cpu package is Multi-Threading capable: number of siblings=%d\n",
- smp_num_siblings);
-#endif
-
- cpu_init(); /* initialize the bootstrap CPU */
-
-#ifdef CONFIG_ACPI_BOOT
- acpi_boot_init();
-#endif
-
-#ifdef CONFIG_VT
- if (!conswitchp) {
-# if defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
-# endif
-# if defined(CONFIG_VGA_CONSOLE)
- /*
- * Non-legacy systems may route legacy VGA MMIO range to system
- * memory. vga_con probes the MMIO hole, so memory looks like
- * a VGA device to it. The EFI memory map can tell us if it's
- * memory so we can avoid this problem.
- */
- if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
- conswitchp = &vga_con;
-# endif
- }
-#endif
-
- /* enable IA-64 Machine Check Abort Handling unless disabled */
- if (!strstr(saved_command_line, "nomca"))
- ia64_mca_init();
-
- platform_setup(cmdline_p);
- paging_init();
-}
-
-#ifndef XEN
-/*
- * Display cpu info for all cpu's.
- */
-static int
-show_cpuinfo (struct seq_file *m, void *v)
-{
-#ifdef CONFIG_SMP
-# define lpj c->loops_per_jiffy
-# define cpunum c->cpu
-#else
-# define lpj loops_per_jiffy
-# define cpunum 0
-#endif
- static struct {
- unsigned long mask;
- const char *feature_name;
- } feature_bits[] = {
- { 1UL << 0, "branchlong" },
- { 1UL << 1, "spontaneous deferral"},
- { 1UL << 2, "16-byte atomic ops" }
- };
- char family[32], features[128], *cp, sep;
- struct cpuinfo_ia64 *c = v;
- unsigned long mask;
- int i;
-
- mask = c->features;
-
- switch (c->family) {
- case 0x07: memcpy(family, "Itanium", 8); break;
- case 0x1f: memcpy(family, "Itanium 2", 10); break;
- default: snprintf(family, sizeof(family), "%u", c->family); break;
- }
-
- /* build the feature string: */
- memcpy(features, " standard", 10);
- cp = features;
- sep = 0;
- for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
- if (mask & feature_bits[i].mask) {
- if (sep)
- *cp++ = sep;
- sep = ',';
- *cp++ = ' ';
- strlcpy(cp, feature_bits[i].feature_name, sizeof(features));
- cp += strlen(feature_bits[i].feature_name);
- mask &= ~feature_bits[i].mask;
- }
- }
- if (mask) {
- /* print unknown features as a hex value: */
- if (sep)
- *cp++ = sep;
- snprintf(cp, sizeof(features) - (cp - features), " 0x%lx", mask);
- }
-
- seq_printf(m,
- "processor : %d\n"
- "vendor : %s\n"
- "arch : IA-64\n"
- "family : %s\n"
- "model : %u\n"
- "revision : %u\n"
- "archrev : %u\n"
- "features :%s\n" /* don't change this---it _is_ right! */
- "cpu number : %lu\n"
- "cpu regs : %u\n"
- "cpu MHz : %lu.%06lu\n"
- "itc MHz : %lu.%06lu\n"
- "BogoMIPS : %lu.%02lu\n",
- cpunum, c->vendor, family, c->model, c->revision, c->archrev,
- features, c->ppn, c->number,
- c->proc_freq / 1000000, c->proc_freq % 1000000,
- c->itc_freq / 1000000, c->itc_freq % 1000000,
- lpj*HZ/500000, (lpj*HZ/5000) % 100);
-#ifdef CONFIG_SMP
- seq_printf(m, "siblings : %u\n", c->num_log);
- if (c->threads_per_core > 1 || c->cores_per_socket > 1)
- seq_printf(m,
- "physical id: %u\n"
- "core id : %u\n"
- "thread id : %u\n",
- c->socket_id, c->core_id, c->thread_id);
-#endif
- seq_printf(m,"\n");
-
- return 0;
-}
-
-static void *
-c_start (struct seq_file *m, loff_t *pos)
-{
-#ifdef CONFIG_SMP
- while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
- ++*pos;
-#endif
- return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
-}
-
-static void *
-c_next (struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return c_start(m, pos);
-}
-
-static void
-c_stop (struct seq_file *m, void *v)
-{
-}
-
-struct seq_operations cpuinfo_op = {
- .start = c_start,
- .next = c_next,
- .stop = c_stop,
- .show = show_cpuinfo
-};
-#endif /* XEN */
-
-void
-identify_cpu (struct cpuinfo_ia64 *c)
-{
- union {
- unsigned long bits[5];
- struct {
- /* id 0 & 1: */
- char vendor[16];
-
- /* id 2 */
- u64 ppn; /* processor serial number */
-
- /* id 3: */
- unsigned number : 8;
- unsigned revision : 8;
- unsigned model : 8;
- unsigned family : 8;
- unsigned archrev : 8;
- unsigned reserved : 24;
-
- /* id 4: */
- u64 features;
- } field;
- } cpuid;
- pal_vm_info_1_u_t vm1;
- pal_vm_info_2_u_t vm2;
- pal_status_t status;
- unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
- int i;
-
- for (i = 0; i < 5; ++i)
- cpuid.bits[i] = ia64_get_cpuid(i);
-
- memcpy(c->vendor, cpuid.field.vendor, 16);
-#ifdef CONFIG_SMP
- c->cpu = smp_processor_id();
-
- /* below default values will be overwritten by identify_siblings()
- * for Multi-Threading/Multi-Core capable cpu's
- */
- c->threads_per_core = c->cores_per_socket = c->num_log = 1;
- c->socket_id = -1;
-
- identify_siblings(c);
-#endif
- c->ppn = cpuid.field.ppn;
- c->number = cpuid.field.number;
- c->revision = cpuid.field.revision;
- c->model = cpuid.field.model;
- c->family = cpuid.field.family;
- c->archrev = cpuid.field.archrev;
- c->features = cpuid.field.features;
-
- status = ia64_pal_vm_summary(&vm1, &vm2);
- if (status == PAL_STATUS_SUCCESS) {
- impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
- phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
- }
- c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
- c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
-}
-
-void
-setup_per_cpu_areas (void)
-{
- /* start_kernel() requires this... */
-}
-
-/*
- * Calculate the max. cache line size.
- *
- * In addition, the minimum of the i-cache stride sizes is calculated for
- * "flush_icache_range()".
- */
-static void
-get_max_cacheline_size (void)
-{
- unsigned long line_size, max = 1;
- u64 l, levels, unique_caches;
- pal_cache_config_info_t cci;
- s64 status;
-
- status = ia64_pal_cache_summary(&levels, &unique_caches);
- if (status != 0) {
- printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
- __FUNCTION__, status);
- max = SMP_CACHE_BYTES;
- /* Safest setup for "flush_icache_range()" */
- ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
-#ifdef XEN
- ia64_d_cache_stride_shift = D_CACHE_STRIDE_SHIFT;
-#endif
- goto out;
- }
-
- for (l = 0; l < levels; ++l) {
- status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
- &cci);
- if (status != 0) {
- printk(KERN_ERR
- "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
- __FUNCTION__, l, status);
- max = SMP_CACHE_BYTES;
- /* The safest setup for "flush_icache_range()" */
- cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
- cci.pcci_unified = 1;
- }
-#ifdef XEN
- if (cci.pcci_stride < ia64_d_cache_stride_shift)
- ia64_d_cache_stride_shift = cci.pcci_stride;
-#endif
- line_size = 1 << cci.pcci_line_size;
- if (line_size > max)
- max = line_size;
- if (!cci.pcci_unified) {
- status = ia64_pal_cache_config_info(l,
- /* cache_type (instruction)= */ 1,
- &cci);
- if (status != 0) {
- printk(KERN_ERR
- "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
- __FUNCTION__, l, status);
- /* The safest setup for "flush_icache_range()" */
- cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
- }
- }
- if (cci.pcci_stride < ia64_i_cache_stride_shift)
- ia64_i_cache_stride_shift = cci.pcci_stride;
- }
- out:
- if (max > ia64_max_cacheline_size)
- ia64_max_cacheline_size = max;
-#ifdef XEN
- if (ia64_d_cache_stride_shift > ia64_i_cache_stride_shift)
- ia64_d_cache_stride_shift = ia64_i_cache_stride_shift;
-#endif
-
-}
-
-/*
- * cpu_init() initializes state that is per-CPU. This function acts
- * as a 'CPU state barrier', nothing should get across.
- */
-void
-cpu_init (void)
-{
- extern void __devinit ia64_mmu_init (void *);
- unsigned long num_phys_stacked;
-#ifndef XEN
- pal_vm_info_2_u_t vmi;
- unsigned int max_ctx;
-#endif
- struct cpuinfo_ia64 *cpu_info;
- void *cpu_data;
-
- cpu_data = per_cpu_init();
-
-#ifdef XEN
- printk(XENLOG_DEBUG "cpu_init: current=%p\n", current);
-#endif
-
- /*
- * We set ar.k3 so that assembly code in MCA handler can compute
- * physical addresses of per cpu variables with a simple:
- * phys = ar.k3 + &per_cpu_var
- */
- ia64_set_kr(IA64_KR_PER_CPU_DATA,
- ia64_tpa(cpu_data) - (long) __per_cpu_start);
-
- get_max_cacheline_size();
-
- /*
- * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
- * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
- * depends on the data returned by identify_cpu(). We break the dependency by
- * accessing cpu_data() through the canonical per-CPU address.
- */
- cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
- identify_cpu(cpu_info);
-
-#ifdef CONFIG_MCKINLEY
- {
-# define FEATURE_SET 16
- struct ia64_pal_retval iprv;
-
- if (cpu_info->family == 0x1f) {
- PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
- if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
- PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
- (iprv.v1 | 0x80), FEATURE_SET, 0);
- }
- }
-#endif
-
- /* Clear the stack memory reserved for pt_regs: */
- memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
-
- ia64_set_kr(IA64_KR_FPU_OWNER, 0);
-
- /*
- * Initialize the page-table base register to a global
- * directory with all zeroes. This ensure that we can handle
- * TLB-misses to user address-space even before we created the
- * first user address-space. This may happen, e.g., due to
- * aggressive use of lfetch.fault.
- */
- ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
-
- /*
- * Initialize default control register to defer speculative faults except
- * for those arising from TLB misses, which are not deferred. The
- * kernel MUST NOT depend on a particular setting of these bits (in other words,
- * the kernel must have recovery code for all speculative accesses). Turn on
- * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
- * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
- * be fine).
- */
-#ifdef XEN
- ia64_setreg(_IA64_REG_CR_DCR, IA64_DEFAULT_DCR_BITS);
-#else
- ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
- | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
-#endif
-#ifndef XEN
- atomic_inc(&init_mm.mm_count);
- current->active_mm = &init_mm;
-#endif
-#ifndef XEN
- if (current->mm)
- BUG();
-#endif
-
-
-#ifdef XEN
- ia64_fph_enable();
- __ia64_init_fpu();
-#endif
-
- ia64_mmu_init(ia64_imva(cpu_data));
- ia64_mca_cpu_init(ia64_imva(cpu_data));
-
-#ifdef CONFIG_IA32_SUPPORT
- ia32_cpu_init();
-#endif
-
- /* Clear ITC to eliminiate sched_clock() overflows in human time. */
- ia64_set_itc(0);
-
- /* disable all local interrupt sources: */
- ia64_set_itv(1 << 16);
- ia64_set_lrr0(1 << 16);
- ia64_set_lrr1(1 << 16);
- ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
- ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
-
- /* clear TPR & XTP to enable all interrupt classes: */
- ia64_setreg(_IA64_REG_CR_TPR, 0);
-#ifdef CONFIG_SMP
- normal_xtp();
-#endif
-
-#ifndef XEN
- /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
- if (ia64_pal_vm_summary(NULL, &vmi) == 0)
- max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
- else {
- printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
- max_ctx = (1U << 15) - 1; /* use architected minimum */
- }
- while (max_ctx < ia64_ctx.max_ctx) {
- unsigned int old = ia64_ctx.max_ctx;
- if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
- break;
- }
-#endif
-
- if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
- printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
- "stacked regs\n");
- num_phys_stacked = 96;
- }
- /* size of physical stacked register partition plus 8 bytes: */
- __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
- platform_cpu_init();
-#ifndef XEN
- pm_idle = default_idle;
-#endif
-
-#ifdef XEN
- /* surrender usage of kernel registers to domain, use percpu area instead */
- __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = ia64_get_kr(IA64_KR_IO_BASE);
- __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = ia64_get_kr(IA64_KR_PER_CPU_DATA);
- __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = ia64_get_kr(IA64_KR_CURRENT_STACK);
- __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = ia64_get_kr(IA64_KR_FPU_OWNER);
- __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = ia64_get_kr(IA64_KR_CURRENT);
- __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = ia64_get_kr(IA64_KR_PT_BASE);
-#endif
-}
-
-#ifndef XEN
-void
-check_bugs (void)
-{
- ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
- (unsigned long) __end___mckinley_e9_bundles);
-}
-#endif
diff --git a/xen/arch/ia64/linux-xen/smp.c b/xen/arch/ia64/linux-xen/smp.c
deleted file mode 100644
index 526e6eec01..0000000000
--- a/xen/arch/ia64/linux-xen/smp.c
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- * SMP Support
- *
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Lots of stuff stolen from arch/alpha/kernel/smp.c
- *
- * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized
- * the existing code (on the lines of x86 port).
- * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
- * calibration on each CPU.
- * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
- * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor
- * & cpu_online_map now gets done here (instead of setup.c)
- * 99/10/05 davidm Update to bring it in sync with new command-line processing
- * scheme.
- * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
- * smp_call_function_single to resend IPI on timeouts
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/smp.h>
-#include <linux/kernel_stat.h>
-#include <linux/mm.h>
-#include <linux/cache.h>
-#include <linux/delay.h>
-#include <linux/efi.h>
-#include <linux/bitops.h>
-
-#include <asm/atomic.h>
-#include <asm/current.h>
-#include <asm/delay.h>
-#include <asm/machvec.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/sal.h>
-#include <asm/system.h>
-#include <asm/tlbflush.h>
-#include <asm/unistd.h>
-#include <asm/mca.h>
-#ifdef XEN
-#include <xen/errno.h>
-#include <asm/vhpt.h>
-#include <asm/hw_irq.h>
-#endif
-
-#ifdef XEN
-//#if CONFIG_SMP || IA64
-#if CONFIG_SMP
-//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-void smp_send_event_check_mask(const cpumask_t *mask)
-{
- int cpu;
-
- /* Not for me. */
- if (cpumask_subset(mask, cpumask_of(smp_processor_id())))
- return;
-
- //printf("smp_send_event_check_mask called\n");
-
- for (cpu = 0; cpu < NR_CPUS; ++cpu)
- if (cpumask_test_cpu(cpu, mask) && cpu != smp_processor_id())
- platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
-}
-#endif
-#endif
-
-#ifdef CONFIG_SMP /* ifdef XEN */
-
-/*
- * Structure and data for smp_call_function(). This is designed to minimise static memory
- * requirements. It also looks cleaner.
- */
-static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- long wait;
- atomic_t started;
- atomic_t finished;
-};
-
-static volatile struct call_data_struct *call_data;
-
-#define IPI_CALL_FUNC 0
-#define IPI_CPU_STOP 1
-#define IPI_STATE_DUMP 2
-
-/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
-static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
-
-extern void cpu_halt (void);
-
-#ifdef XEN
-/* work around for spinlock irq check. */
-void
-lock_ipi_calllock(unsigned long *flags)
-{
- spin_lock_irqsave(&call_lock, *flags);
-}
-
-void
-unlock_ipi_calllock(unsigned long flags)
-{
- spin_unlock_irqrestore(&call_lock, flags);
-}
-#else
-void
-lock_ipi_calllock(void)
-{
- spin_lock_irq(&call_lock);
-}
-
-void
-unlock_ipi_calllock(void)
-{
- spin_unlock_irq(&call_lock);
-}
-#endif
-
-static void
-stop_this_cpu (void)
-{
- /*
- * Remove this CPU:
- */
- cpumask_clear_cpu(smp_processor_id(), &cpu_online_map);
- max_xtp();
- local_irq_disable();
- cpu_halt();
-}
-
-void
-cpu_die(void)
-{
- max_xtp();
- local_irq_disable();
- cpu_halt();
- /* Should never be here */
- BUG();
- for (;;);
-}
-
-irqreturn_t
-handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
-{
- int this_cpu = get_cpu();
- unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
- unsigned long ops;
-
-#ifdef XEN
- perfc_incr(ipis);
-#endif
- mb(); /* Order interrupt and bit testing. */
- while ((ops = xchg(pending_ipis, 0)) != 0) {
- mb(); /* Order bit clearing and data access. */
- do {
- unsigned long which;
-
- which = ffz(~ops);
- ops &= ~(1 << which);
-
- switch (which) {
- case IPI_CALL_FUNC:
- {
- struct call_data_struct *data;
- void (*func)(void *info);
- void *info;
- int wait;
-
- /* release the 'pointer lock' */
- data = (struct call_data_struct *) call_data;
- func = data->func;
- info = data->info;
- wait = data->wait;
-
- mb();
- atomic_inc(&data->started);
- /*
- * At this point the structure may be gone unless
- * wait is true.
- */
- (*func)(info);
-
- /* Notify the sending CPU that the task is done. */
- mb();
- if (wait)
- atomic_inc(&data->finished);
- }
- break;
-
- case IPI_CPU_STOP:
- stop_this_cpu();
- break;
-
- case IPI_STATE_DUMP:
- dump_execstate(regs);
- break;
-
- default:
- printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
- break;
- }
- } while (ops);
- mb(); /* Order data access and bit testing. */
- }
- put_cpu();
-#ifndef XEN
- return IRQ_HANDLED;
-#endif
-}
-
-/*
- * Called with preeemption disabled.
- */
-static inline void
-send_IPI_single (int dest_cpu, int op)
-{
- set_bit(op, &per_cpu(ipi_operation, dest_cpu));
- platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
-}
-
-/*
- * Called with preeemption disabled.
- */
-static inline void
-send_IPI_allbutself (int op)
-{
- unsigned int i;
-
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i) && i != smp_processor_id())
- send_IPI_single(i, op);
- }
-}
-
-/*
- * Called with preeemption disabled.
- */
-static inline void
-send_IPI_all (int op)
-{
- int i;
-
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
- send_IPI_single(i, op);
-}
-
-/*
- * Called with preeemption disabled.
- */
-static inline void
-send_IPI_self (int op)
-{
- send_IPI_single(smp_processor_id(), op);
-}
-
-#ifndef XEN
-/*
- * Called with preeemption disabled.
- */
-void
-smp_send_reschedule (int cpu)
-{
- platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
-}
-
-void
-smp_flush_tlb_all (void)
-{
- on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
-}
-
-void
-smp_flush_tlb_mm (struct mm_struct *mm)
-{
- preempt_disable();
- /* this happens for the common case of a single-threaded fork(): */
- if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
- {
- local_finish_flush_tlb_mm(mm);
- preempt_enable();
- return;
- }
-
- preempt_enable();
- /*
- * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
- * have been running in the address space. It's not clear that this is worth the
- * trouble though: to avoid races, we have to raise the IPI on the target CPU
- * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
- * rather trivial.
- */
- on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
-}
-#endif
-
-/*
- * Run a function on another CPU
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> Currently unused.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
-
-int
-smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
- int wait)
-{
- struct call_data_struct data;
- int cpus = 1;
- int me = get_cpu(); /* prevent preemption and reschedule on another processor */
-
- if (cpuid == me) {
- printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__);
- put_cpu();
- return -EBUSY;
- }
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
-#ifdef XEN
- spin_lock(&call_lock);
-#else
- spin_lock_bh(&call_lock);
-#endif
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- send_IPI_single(cpuid, IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- call_data = NULL;
-
-#ifdef XEN
- spin_unlock(&call_lock);
-#else
- spin_unlock_bh(&call_lock);
-#endif
- put_cpu();
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
-/*
- * this function sends a 'generic call function' IPI to all other CPUs
- * in the system.
- */
-
-/*
- * [SUMMARY] Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func> or are or have
- * executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-void
-smp_call_function (void (*func) (void *info), void *info, int wait)
-{
- struct call_data_struct data;
- int cpus = num_online_cpus()-1;
-
- if (!cpus)
- return;
-
- /* Can deadlock when called with interrupts disabled */
-#ifdef XEN
- if (irqs_disabled()) {
- printk("smp_call_function called with interrupts disabled...");
- printk("enabling interrupts\n");
- local_irq_enable();
- }
-#else
- WARN_ON(irqs_disabled());
-#endif
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- spin_lock(&call_lock);
-#if 0 //def XEN
- printk("smp_call_function: %d lock\n", smp_processor_id ());
-#endif
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- send_IPI_allbutself(IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- call_data = NULL;
-
- spin_unlock(&call_lock);
-#if 0 //def XEN
- printk("smp_call_function: DONE WITH spin_unlock, returning \n");
-#endif
-}
-EXPORT_SYMBOL(smp_call_function);
-
-#ifdef XEN
-void
-on_selected_cpus(const cpumask_t *selected, void (*func) (void *info),
- void *info, int wait)
-{
- struct call_data_struct data;
- unsigned int cpu, nr_cpus = cpumask_weight(selected);
-
- ASSERT(local_irq_is_enabled());
-
- if (!nr_cpus)
- return;
-
- data.func = func;
- data.info = info;
- data.wait = wait;
- atomic_set(&data.started, 0);
- atomic_set(&data.finished, 0);
-
- spin_lock(&call_lock);
-
- call_data = &data;
- wmb();
-
- for_each_cpu(cpu, selected)
- send_IPI_single(cpu, IPI_CALL_FUNC);
-
- while (atomic_read(wait ? &data.finished : &data.started) != nr_cpus)
- cpu_relax();
-
- spin_unlock(&call_lock);
-}
-#endif
-
-/*
- * this function calls the 'stop' function on all other CPUs in the system.
- */
-void
-smp_send_stop (void)
-{
- send_IPI_allbutself(IPI_CPU_STOP);
-}
-
-void
-smp_send_state_dump (unsigned int cpu)
-{
- send_IPI_single(cpu, IPI_STATE_DUMP);
-}
-
-int __init
-setup_profiling_timer (unsigned int multiplier)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_SMP ifdef XEN */
diff --git a/xen/arch/ia64/linux-xen/smpboot.c b/xen/arch/ia64/linux-xen/smpboot.c
deleted file mode 100644
index f9ee4fd531..0000000000
--- a/xen/arch/ia64/linux-xen/smpboot.c
+++ /dev/null
@@ -1,977 +0,0 @@
-/*
- * SMP boot-related support
- *
- * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2001, 2004-2005 Intel Corp
- * Rohit Seth <rohit.seth@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Gordon Jin <gordon.jin@intel.com>
- * Ashok Raj <ashok.raj@intel.com>
- *
- * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
- * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
- * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
- * smp_boot_cpus()/smp_commence() is replaced by
- * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
- * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
- * 04/12/26 Jin Gordon <gordon.jin@intel.com>
- * 04/12/26 Rohit Seth <rohit.seth@intel.com>
- * Add multi-threading and multi-core detection
- * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
- * Setup cpu_sibling_map and cpu_core_map
- */
-#include <linux/config.h>
-
-#include <linux/module.h>
-#include <linux/acpi.h>
-#include <linux/bootmem.h>
-#include <linux/cpu.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/mm.h>
-#include <linux/notifier.h> /* hg add me */
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/spinlock.h>
-#include <linux/efi.h>
-#include <linux/percpu.h>
-#include <linux/bitops.h>
-
-#include <asm/atomic.h>
-#include <asm/cache.h>
-#include <asm/current.h>
-#include <asm/delay.h>
-#include <asm/ia32.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/machvec.h>
-#include <asm/mca.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/sal.h>
-#include <asm/system.h>
-#include <asm/tlbflush.h>
-#include <asm/unistd.h>
-
-#ifdef XEN
-#include <xen/domain.h>
-#include <asm/hw_irq.h>
-#include <asm/vmx.h>
-#ifndef CONFIG_SMP
-cpumask_t cpu_online_map = CPU_MASK_CPU0;
-EXPORT_SYMBOL(cpu_online_map);
-#endif
-#endif
-
-#ifdef CONFIG_SMP /* ifdef XEN */
-
-#define SMP_DEBUG 0
-
-#if SMP_DEBUG
-#define Dprintk(x...) printk(x)
-#else
-#define Dprintk(x...)
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * Store all idle threads, this can be reused instead of creating
- * a new thread. Also avoids complicated thread destroy functionality
- * for idle threads.
- */
-struct task_struct *idle_thread_array[NR_CPUS];
-
-/*
- * Global array allocated for NR_CPUS at boot time
- */
-struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
-
-/*
- * start_ap in head.S uses this to store current booting cpu
- * info.
- */
-struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
-
-#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
-
-#define get_idle_for_cpu(x) (idle_thread_array[(x)])
-#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
-
-#else
-
-#define get_idle_for_cpu(x) (NULL)
-#define set_idle_for_cpu(x,p)
-#define set_brendez_area(x)
-#endif
-
-
-/*
- * ITC synchronization related stuff:
- */
-#define MASTER 0
-#define SLAVE (SMP_CACHE_BYTES/8)
-
-#define NUM_ROUNDS 64 /* magic value */
-#define NUM_ITERS 5 /* likewise */
-
-static DEFINE_SPINLOCK(itc_sync_lock);
-static volatile unsigned long go[SLAVE + 1];
-
-#define DEBUG_ITC_SYNC 0
-
-extern void __devinit calibrate_delay (void);
-extern void start_ap (void);
-extern unsigned long ia64_iobase;
-
-task_t *task_for_booting_cpu;
-
-/*
- * State for each CPU
- */
-DEFINE_PER_CPU(int, cpu_state);
-
-/* Bitmasks of currently online, and possible CPUs */
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
-
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask);
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_mask);
-int smp_num_siblings = 1;
-int smp_num_cpucores = 1;
-
-/* which logical CPU number maps to which CPU (physical APIC ID) */
-volatile int ia64_cpu_to_sapicid[NR_CPUS];
-EXPORT_SYMBOL(ia64_cpu_to_sapicid);
-
-static volatile cpumask_t cpu_callin_map;
-
-struct smp_boot_data smp_boot_data __initdata;
-
-unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
-
-char __initdata no_int_routing;
-
-unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
-
-static int __init
-nointroute (char *str)
-{
- no_int_routing = 1;
- printk ("no_int_routing on\n");
- return 1;
-}
-
-__setup("nointroute", nointroute);
-
-static void fix_b0_for_bsp(void)
-{
-#ifdef CONFIG_HOTPLUG_CPU
- int cpuid;
- static int fix_bsp_b0 = 1;
-
- cpuid = smp_processor_id();
-
- /*
- * Cache the b0 value on the first AP that comes up
- */
- if (!(fix_bsp_b0 && cpuid))
- return;
-
- sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0];
- printk ("Fixed BSP b0 value from CPU %d\n", cpuid);
-
- fix_bsp_b0 = 0;
-#endif
-}
-
-void
-sync_master (void *arg)
-{
- unsigned long flags, i;
-
- go[MASTER] = 0;
-
- local_irq_save(flags);
- {
- for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
- while (!go[MASTER])
- cpu_relax();
- go[MASTER] = 0;
- go[SLAVE] = ia64_get_itc();
- }
- }
- local_irq_restore(flags);
-}
-
-/*
- * Return the number of cycles by which our itc differs from the itc on the master
- * (time-keeper) CPU. A positive number indicates our itc is ahead of the master,
- * negative that it is behind.
- */
-static inline long
-#ifdef XEN /* warning cleanup */
-get_delta (unsigned long *rt, unsigned long *master)
-#else
-get_delta (long *rt, long *master)
-#endif
-{
- unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
- unsigned long tcenter, t0, t1, tm;
- long i;
-
- for (i = 0; i < NUM_ITERS; ++i) {
- t0 = ia64_get_itc();
- go[MASTER] = 1;
- while (!(tm = go[SLAVE]))
- cpu_relax();
- go[SLAVE] = 0;
- t1 = ia64_get_itc();
-
- if (t1 - t0 < best_t1 - best_t0)
- best_t0 = t0, best_t1 = t1, best_tm = tm;
- }
-
- *rt = best_t1 - best_t0;
- *master = best_tm - best_t0;
-
- /* average best_t0 and best_t1 without overflow: */
- tcenter = (best_t0/2 + best_t1/2);
- if (best_t0 % 2 + best_t1 % 2 == 2)
- ++tcenter;
- return tcenter - best_tm;
-}
-
-/*
- * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
- * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
- * unaccounted-for errors (such as getting a machine check in the middle of a calibration
- * step). The basic idea is for the slave to ask the master what itc value it has and to
- * read its own itc before and after the master responds. Each iteration gives us three
- * timestamps:
- *
- * slave master
- *
- * t0 ---\
- * ---\
- * --->
- * tm
- * /---
- * /---
- * t1 <---
- *
- *
- * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
- * and t1. If we achieve this, the clocks are synchronized provided the interconnect
- * between the slave and the master is symmetric. Even if the interconnect were
- * asymmetric, we would still know that the synchronization error is smaller than the
- * roundtrip latency (t0 - t1).
- *
- * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
- * within one or two cycles. However, we can only *guarantee* that the synchronization is
- * accurate to within a round-trip time, which is typically in the range of several
- * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
- * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
- * than half a micro second or so.
- */
-void
-ia64_sync_itc (unsigned int master)
-{
- long i, delta, adj, adjust_latency = 0, done = 0;
- unsigned long flags, rt, master_time_stamp, bound;
-#if DEBUG_ITC_SYNC
- struct {
- long rt; /* roundtrip time */
- long master; /* master's timestamp */
- long diff; /* difference between midpoint and master's timestamp */
- long lat; /* estimate of itc adjustment latency */
- } t[NUM_ROUNDS];
-#endif
-
- /*
- * Make sure local timer ticks are disabled while we sync. If
- * they were enabled, we'd have to worry about nasty issues
- * like setting the ITC ahead of (or a long time before) the
- * next scheduled tick.
- */
- BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
-
- go[MASTER] = 1;
-
- if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
- printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
- return;
- }
-
- while (go[MASTER])
- cpu_relax(); /* wait for master to be ready */
-
- spin_lock_irqsave(&itc_sync_lock, flags);
- {
- for (i = 0; i < NUM_ROUNDS; ++i) {
- delta = get_delta(&rt, &master_time_stamp);
- if (delta == 0) {
- done = 1; /* let's lock on to this... */
- bound = rt;
- }
-
- if (!done) {
- if (i > 0) {
- adjust_latency += -delta;
- adj = -delta + adjust_latency/4;
- } else
- adj = -delta;
-
- ia64_set_itc(ia64_get_itc() + adj);
- }
-#if DEBUG_ITC_SYNC
- t[i].rt = rt;
- t[i].master = master_time_stamp;
- t[i].diff = delta;
- t[i].lat = adjust_latency/4;
-#endif
- }
- }
- spin_unlock_irqrestore(&itc_sync_lock, flags);
-
-#if DEBUG_ITC_SYNC
- for (i = 0; i < NUM_ROUNDS; ++i)
- printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
- t[i].rt, t[i].master, t[i].diff, t[i].lat);
-#endif
-
- printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
- "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
-}
-
-/*
- * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
- */
-static inline void __devinit
-smp_setup_percpu_timer (void)
-{
-}
-
-static void __devinit
-smp_callin (void)
-{
-#ifdef XEN
- /* work around for spinlock irq assert. */
- unsigned long flags;
-#endif
- int cpuid, phys_id;
- extern void ia64_init_itm(void);
-
-#ifdef CONFIG_PERFMON
- extern void pfm_init_percpu(void);
-#endif
-
- cpuid = smp_processor_id();
- phys_id = hard_smp_processor_id();
-
- if (cpu_online(cpuid)) {
- printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
- phys_id, cpuid);
- BUG();
- }
-
- fix_b0_for_bsp();
-
-#ifdef XEN
- notify_cpu_starting(cpuid);
- lock_ipi_calllock(&flags);
-#else
- lock_ipi_calllock();
-#endif
- cpumask_set_cpu(cpuid, &cpu_online_map);
-#ifdef XEN
- unlock_ipi_calllock(flags);
-#else
- unlock_ipi_calllock();
-#endif
- per_cpu(cpu_state, cpuid) = CPU_ONLINE;
-
- smp_setup_percpu_timer();
-
- ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
-
-#ifdef CONFIG_PERFMON
- pfm_init_percpu();
-#endif
-
- local_irq_enable();
-
- if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
- /*
- * Synchronize the ITC with the BP. Need to do this after irqs are
- * enabled because ia64_sync_itc() calls smp_call_function_single(), which
- * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
- * local_bh_enable(), which bugs out if irqs are not enabled...
- */
- Dprintk("Going to syncup ITC with BP.\n");
- ia64_sync_itc(0);
- }
-
- /*
- * Get our bogomips.
- */
- ia64_init_itm();
-#ifndef XEN
- calibrate_delay();
-#endif
- local_cpu_data->loops_per_jiffy = loops_per_jiffy;
-
-#ifdef CONFIG_IA32_SUPPORT
- ia32_gdt_init();
-#endif
-
- /*
- * Allow the master to continue.
- */
- cpumask_set_cpu(cpuid, &cpu_callin_map);
- Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
-}
-
-
-/*
- * Activate a secondary processor. head.S calls this.
- */
-int __devinit
-start_secondary (void *unused)
-{
- /* Early console may use I/O ports */
- ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
-#ifndef XEN
- Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
- efi_map_pal_code();
-#endif
- cpu_init();
- smp_callin();
-
-#ifdef XEN
- if (vmx_enabled)
- vmx_init_env(0, 0);
-
- startup_cpu_idle_loop();
-#else
- cpu_idle();
-#endif
- return 0;
-}
-
-struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
-{
- return NULL;
-}
-
-#ifndef XEN
-struct create_idle {
- struct task_struct *idle;
- struct completion done;
- int cpu;
-};
-
-void
-do_fork_idle(void *_c_idle)
-{
- struct create_idle *c_idle = _c_idle;
-
- c_idle->idle = fork_idle(c_idle->cpu);
- complete(&c_idle->done);
-}
-#endif
-
-static int __devinit
-do_boot_cpu (int sapicid, int cpu)
-{
- int timeout;
-#ifndef XEN
- struct create_idle c_idle = {
- .cpu = cpu,
- .done = COMPLETION_INITIALIZER(c_idle.done),
- };
- DECLARE_WORK(work, do_fork_idle, &c_idle);
-
- c_idle.idle = get_idle_for_cpu(cpu);
- if (c_idle.idle) {
- init_idle(c_idle.idle, cpu);
- goto do_rest;
- }
-
- /*
- * We can't use kernel_thread since we must avoid to reschedule the child.
- */
- if (!keventd_up() || current_is_keventd())
- work.func(work.data);
- else {
- schedule_work(&work);
- wait_for_completion(&c_idle.done);
- }
-
- if (IS_ERR(c_idle.idle))
- panic("failed fork for CPU %d", cpu);
-
- set_idle_for_cpu(cpu, c_idle.idle);
-
-do_rest:
- task_for_booting_cpu = c_idle.idle;
-#else
- struct vcpu *v;
-
- v = idle_vcpu[cpu];
- BUG_ON(v == NULL);
-
- //printf ("do_boot_cpu: cpu=%d, domain=%p, vcpu=%p\n", cpu, idle, v);
-
- task_for_booting_cpu = (task_t *)v;
-
- /* Set cpu number. */
- get_thread_info(v)->cpu = cpu;
-#endif
-
- Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
-
- set_brendez_area(cpu);
- platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
-
- /*
- * Wait 10s total for the AP to start
- */
- Dprintk("Waiting on callin_map ...");
- for (timeout = 0; timeout < 100000; timeout++) {
- if (cpumask_test_cpu(cpu, &cpu_callin_map))
- break; /* It has booted */
- udelay(100);
- }
- Dprintk("\n");
-
- if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
- printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
- ia64_cpu_to_sapicid[cpu] = -1;
- cpumask_clear_cpu(cpu, &cpu_online_map); /* was set in smp_callin() */
- return -EINVAL;
- }
- return 0;
-}
-
-#ifndef XEN
-static int __init
-decay (char *str)
-{
- int ticks;
- get_option (&str, &ticks);
- return 1;
-}
-
-__setup("decay=", decay);
-#endif
-
-/*
- * Initialize the logical CPU number to SAPICID mapping
- */
-void __init
-smp_build_cpu_map (void)
-{
- int sapicid, cpu, i;
- int boot_cpu_id = hard_smp_processor_id();
-
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- ia64_cpu_to_sapicid[cpu] = -1;
-#ifndef XEN
-#ifdef CONFIG_HOTPLUG_CPU
- cpu_set(cpu, cpu_possible_map);
-#endif
-#endif
- }
-
- ia64_cpu_to_sapicid[0] = boot_cpu_id;
- cpumask_clear(&cpu_present_map);
- cpumask_set_cpu(0, &cpu_present_map);
- cpumask_set_cpu(0, &cpu_possible_map);
- for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
- sapicid = smp_boot_data.cpu_phys_id[i];
- if (sapicid == boot_cpu_id)
- continue;
- cpumask_set_cpu(cpu, &cpu_present_map);
- cpumask_set_cpu(cpu, &cpu_possible_map);
- ia64_cpu_to_sapicid[cpu] = sapicid;
- cpu++;
- }
-}
-
-/*
- * Cycle through the APs sending Wakeup IPIs to boot each.
- */
-void __init
-smp_prepare_cpus (unsigned int max_cpus)
-{
- int boot_cpu_id = hard_smp_processor_id();
-
- /*
- * Initialize the per-CPU profiling counter/multiplier
- */
-
- smp_setup_percpu_timer();
-
- /*
- * We have the boot CPU online for sure.
- */
- cpumask_set_cpu(0, &cpu_online_map);
- cpumask_set_cpu(0, &cpu_callin_map);
-
- local_cpu_data->loops_per_jiffy = loops_per_jiffy;
- ia64_cpu_to_sapicid[0] = boot_cpu_id;
-
- printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
-
- current_thread_info()->cpu = 0;
-
- /*
- * If SMP should be disabled, then really disable it!
- */
- if (!max_cpus) {
- printk(KERN_INFO "SMP mode deactivated.\n");
- cpumask_clear(&cpu_online_map);
- cpumask_clear(&cpu_present_map);
- cpumask_clear(&cpu_possible_map);
- cpumask_set_cpu(0, &cpu_online_map);
- cpumask_set_cpu(0, &cpu_present_map);
- cpumask_set_cpu(0, &cpu_possible_map);
- return;
- }
-}
-
-void __devinit smp_prepare_boot_cpu(void)
-{
- cpumask_set_cpu(smp_processor_id(), &cpu_online_map);
- cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
- per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
-}
-
-/*
- * mt_info[] is a temporary store for all info returned by
- * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the
- * specific cpu comes.
- */
-static struct {
- __u32 socket_id;
- __u16 core_id;
- __u16 thread_id;
- __u16 proc_fixed_addr;
- __u8 valid;
-} mt_info[NR_CPUS] __devinitdata;
-
-#if defined(XEN) && !defined(CONFIG_HOTPLUG_CPU)
-static inline void
-remove_from_mtinfo(int cpu)
-{
- int i;
-
- for_each_possible_cpu(i)
- if (mt_info[i].valid && mt_info[i].socket_id ==
- cpu_data(cpu)->socket_id)
- mt_info[i].valid = 0;
-}
-
-static inline void
-clear_cpu_sibling_map(int cpu)
-{
- int i;
-
- for_each_cpu(i, per_cpu(cpu_sibling_mask, cpu))
- cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, i));
- for_each_cpu(i, per_cpu(cpu_core_mask, cpu))
- cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, i));
-
- cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
- cpumask_clear(per_cpu(cpu_core_mask, cpu));
-}
-
-static void
-remove_siblinginfo(int cpu)
-{
- int last = 0;
-
- if (cpu_data(cpu)->threads_per_core == 1 &&
- cpu_data(cpu)->cores_per_socket == 1) {
- cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, cpu));
- cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
- return;
- }
-
- last = (cpumask_weight(per_cpu(cpu_core_mask, cpu)) == 1);
-
- /* remove it from all sibling map's */
- clear_cpu_sibling_map(cpu);
-
- /* if this cpu is the last in the core group, remove all its info
- * from mt_info structure
- */
- if (last)
- remove_from_mtinfo(cpu);
-}
-
-extern void fixup_irqs(void);
-/* must be called with cpucontrol mutex held */
-void __cpu_disable(void)
-{
- int cpu = smp_processor_id();
-
- remove_siblinginfo(cpu);
- cpumask_clear_cpu(cpu, &cpu_online_map);
-#ifndef XEN
- fixup_irqs();
-#endif
- local_flush_tlb_all();
- cpumask_clear_cpu(cpu, &cpu_callin_map);
-}
-#else /* !CONFIG_HOTPLUG_CPU */
-void __cpu_disable(void)
-{
- BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-#ifdef CONFIG_HOTPLUG_CPU
-void __cpu_die(unsigned int cpu)
-{
- unsigned int i;
-
- for (i = 0; i < 100; i++) {
- /* They ack this in play_dead by setting CPU_DEAD */
- if (per_cpu(cpu_state, cpu) == CPU_DEAD)
- {
- printk ("CPU %d is now offline\n", cpu);
- return;
- }
-#ifdef XEN
- udelay(100 * 1000);
-#else
- msleep(100);
-#endif
- }
- printk(KERN_ERR "CPU %u didn't die...\n", cpu);
-}
-#else /* !CONFIG_HOTPLUG_CPU */
-void __cpu_die(unsigned int cpu)
-{
- /* We said "no" in __cpu_disable */
- BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-void
-smp_cpus_done(void)
-{
- int cpu;
- unsigned long bogosum = 0;
-
- /*
- * Allow the user to impress friends.
- */
-
- for (cpu = 0; cpu < NR_CPUS; cpu++)
- if (cpu_online(cpu))
- bogosum += cpu_data(cpu)->loops_per_jiffy;
-
- printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
- (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
-}
-
-static inline void __devinit
-set_cpu_sibling_map(int cpu)
-{
- int i;
-
- for_each_online_cpu(i) {
- if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
- cpumask_set_cpu(i, per_cpu(cpu_core_mask, cpu));
- cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, i));
- if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
- cpumask_set_cpu(i, per_cpu(cpu_sibling_mask, cpu));
- cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, i));
- }
- }
- }
-}
-
-int __devinit
-__cpu_up (unsigned int cpu)
-{
- int ret;
- int sapicid;
-
- sapicid = ia64_cpu_to_sapicid[cpu];
- if (sapicid == -1)
- return -EINVAL;
-
- /*
- * Already booted cpu? not valid anymore since we dont
- * do idle loop tightspin anymore.
- */
- if (cpumask_test_cpu(cpu, &cpu_callin_map))
- return -EINVAL;
-
- if (!per_cpu(cpu_sibling_mask, cpu) &&
- !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)))
- return -ENOMEM;
-
- if (!per_cpu(cpu_core_mask, cpu) &&
- !zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)))
- return -ENOMEM;
-
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
- /* Processor goes to start_secondary(), sets online flag */
- ret = do_boot_cpu(sapicid, cpu);
- if (ret < 0)
- return ret;
-
- if (cpu_data(cpu)->threads_per_core == 1 &&
- cpu_data(cpu)->cores_per_socket == 1) {
- cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
- cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, cpu));
- return 0;
- }
-
- set_cpu_sibling_map(cpu);
-
- return 0;
-}
-
-/*
- * Assume that CPU's have been discovered by some platform-dependent interface. For
- * SoftSDV/Lion, that would be ACPI.
- *
- * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
- */
-void __init
-init_smp_config(void)
-{
- struct fptr {
- unsigned long fp;
- unsigned long gp;
- } *ap_startup;
- long sal_ret;
-
- /* Tell SAL where to drop the AP's. */
- ap_startup = (struct fptr *) start_ap;
- sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
- ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
- if (sal_ret < 0)
- printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
- ia64_sal_strerror(sal_ret));
-}
-
-static inline int __devinit
-check_for_mtinfo_index(void)
-{
- int i;
-
- for_each_possible_cpu(i)
- if (!mt_info[i].valid)
- return i;
-
- return -1;
-}
-
-/*
- * Search the mt_info to find out if this socket's cid/tid information is
- * cached or not. If the socket exists, fill in the core_id and thread_id
- * in cpuinfo
- */
-static int __devinit
-check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c)
-{
- int i;
- __u32 sid = c->socket_id;
-
- for_each_possible_cpu(i) {
- if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address
- && mt_info[i].socket_id == sid) {
- c->core_id = mt_info[i].core_id;
- c->thread_id = mt_info[i].thread_id;
- return 1; /* not a new socket */
- }
- }
- return 0;
-}
-
-/*
- * identify_siblings(cpu) gets called from identify_cpu. This populates the
- * information related to logical execution units in per_cpu_data structure.
- */
-void __devinit
-identify_siblings(struct cpuinfo_ia64 *c)
-{
- s64 status;
- u16 pltid;
- u64 proc_fixed_addr;
- int count, i;
- pal_logical_to_physical_t info;
-
- if (smp_num_cpucores == 1 && smp_num_siblings == 1)
- return;
-
- if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) {
- printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
- status);
- return;
- }
- if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) {
- printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
- return;
- }
- if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) {
- printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status);
- return;
- }
-
- c->socket_id = (pltid << 8) | info.overview_ppid;
- c->cores_per_socket = info.overview_cpp;
- c->threads_per_core = info.overview_tpc;
- count = c->num_log = info.overview_num_log;
-
- /* If the thread and core id information is already cached, then
- * we will simply update cpu_info and return. Otherwise, we will
- * do the PAL calls and cache core and thread id's of all the siblings.
- */
- if (check_for_new_socket(proc_fixed_addr, c))
- return;
-
- for (i = 0; i < count; i++) {
- int index;
-
- if (i && (status = ia64_pal_logical_to_phys(i, &info))
- != PAL_STATUS_SUCCESS) {
- printk(KERN_ERR "ia64_pal_logical_to_phys failed"
- " with %ld\n", status);
- return;
- }
- if (info.log2_la == proc_fixed_addr) {
- c->core_id = info.log1_cid;
- c->thread_id = info.log1_tid;
- }
-
- index = check_for_mtinfo_index();
- /* We will not do the mt_info caching optimization in this case.
- */
- if (index < 0)
- continue;
-
- mt_info[index].valid = 1;
- mt_info[index].socket_id = c->socket_id;
- mt_info[index].core_id = info.log1_cid;
- mt_info[index].thread_id = info.log1_tid;
- mt_info[index].proc_fixed_addr = info.log2_la;
- }
-}
-#endif /* CONFIG_SMP ifdef XEN */
diff --git a/xen/arch/ia64/linux-xen/sn/Makefile b/xen/arch/ia64/linux-xen/sn/Makefile
deleted file mode 100644
index 96dd76f238..0000000000
--- a/xen/arch/ia64/linux-xen/sn/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y += kernel
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/Makefile b/xen/arch/ia64/linux-xen/sn/kernel/Makefile
deleted file mode 100644
index dc0d142533..0000000000
--- a/xen/arch/ia64/linux-xen/sn/kernel/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-y += sn2_smp.o
-obj-y += setup.o
-obj-y += iomv.o
-obj-y += irq.o
-obj-y += io_init.o
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/README.origin b/xen/arch/ia64/linux-xen/sn/kernel/README.origin
deleted file mode 100644
index 312a90add1..0000000000
--- a/xen/arch/ia64/linux-xen/sn/kernel/README.origin
+++ /dev/null
@@ -1,12 +0,0 @@
-# Source files in this directory are near-identical copies of linux-2.6.19
-# files:
-
-# NOTE: ALL changes to these files should be clearly marked
-# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
-# easily updated to future versions of the corresponding Linux files.
-
-io_init.c -> linux/arch/ia64/sn/kernel/io_init.c
-iomv.c -> linux/arch/ia64/sn/kernel/iomv.c
-irq.c -> linux/arch/ia64/sn/kernel/irq.c
-setup.c -> linux/arch/ia64/sn/kernel/setup.c
-sn2_smp.c -> linux/arch/ia64/sn/kernel/sn2/sn2_smp.c
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/io_init.c b/xen/arch/ia64/linux-xen/sn/kernel/io_init.c
deleted file mode 100644
index ca0b66d0d8..0000000000
--- a/xen/arch/ia64/linux-xen/sn/kernel/io_init.c
+++ /dev/null
@@ -1,793 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/bootmem.h>
-#include <linux/nodemask.h>
-#ifdef XEN
-#include <linux/init.h>
-#endif
-#include <asm/sn/types.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/sn_feature_sets.h>
-#include <asm/sn/geo.h>
-#include <asm/sn/io.h>
-#include <asm/sn/l1.h>
-#include <asm/sn/module.h>
-#include <asm/sn/pcibr_provider.h>
-#include <asm/sn/pcibus_provider_defs.h>
-#include <asm/sn/pcidev.h>
-#include <asm/sn/simulator.h>
-#include <asm/sn/sn_sal.h>
-#ifndef XEN
-#include <asm/sn/tioca_provider.h>
-#include <asm/sn/tioce_provider.h>
-#endif
-#ifdef XEN
-#include "asm/sn/hubdev.h"
-#include "asm/sn/xwidgetdev.h"
-#else
-#include "xtalk/hubdev.h"
-#include "xtalk/xwidgetdev.h"
-#endif
-
-
-extern void sn_init_cpei_timer(void);
-extern void register_sn_procfs(void);
-#ifdef XEN
-#define pci_dev_get(dev) do{}while(0)
-extern void sn_irq_lh_init(void);
-#endif
-
-static struct list_head sn_sysdata_list;
-
-/* sysdata list struct */
-struct sysdata_el {
- struct list_head entry;
- void *sysdata;
-};
-
-struct slab_info {
- struct hubdev_info hubdev;
-};
-
-struct brick {
- moduleid_t id; /* Module ID of this module */
- struct slab_info slab_info[MAX_SLABS + 1];
-};
-
-int sn_ioif_inited; /* SN I/O infrastructure initialized? */
-
-struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
-
-static int max_segment_number; /* Default highest segment number */
-static int max_pcibus_number = 255; /* Default highest pci bus number */
-
-/*
- * Hooks and struct for unsupported pci providers
- */
-
-static dma_addr_t
-sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
-{
- return 0;
-}
-
-static void
-sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
-{
- return;
-}
-
-static void *
-sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
-{
- return NULL;
-}
-
-static struct sn_pcibus_provider sn_pci_default_provider = {
- .dma_map = sn_default_pci_map,
- .dma_map_consistent = sn_default_pci_map,
- .dma_unmap = sn_default_pci_unmap,
- .bus_fixup = sn_default_pci_bus_fixup,
-};
-
-/*
- * Retrieve the DMA Flush List given nasid, widget, and device.
- * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
- */
-static inline u64
-sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
- u64 address)
-{
- struct ia64_sal_retval ret_stuff;
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
-
- SAL_CALL_NOLOCK(ret_stuff,
- (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
- (u64) nasid, (u64) widget_num,
- (u64) device_num, (u64) address, 0, 0, 0);
- return ret_stuff.status;
-}
-
-/*
- * Retrieve the hub device info structure for the given nasid.
- */
-static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
-{
- struct ia64_sal_retval ret_stuff;
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
-
- SAL_CALL_NOLOCK(ret_stuff,
- (u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
- (u64) handle, (u64) address, 0, 0, 0, 0, 0);
- return ret_stuff.v0;
-}
-
-/*
- * Retrieve the pci bus information given the bus number.
- */
-static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
-{
- struct ia64_sal_retval ret_stuff;
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
-
- SAL_CALL_NOLOCK(ret_stuff,
- (u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
- (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
- return ret_stuff.v0;
-}
-
-/*
- * Retrieve the pci device information given the bus and device|function number.
- */
-static inline u64
-sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
- u64 sn_irq_info)
-{
- struct ia64_sal_retval ret_stuff;
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
-
- SAL_CALL_NOLOCK(ret_stuff,
- (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
- (u64) segment, (u64) bus_number, (u64) devfn,
- (u64) pci_dev,
- sn_irq_info, 0, 0);
- return ret_stuff.v0;
-}
-
-#ifndef XEN
-/*
- * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
- * device.
- */
-inline struct pcidev_info *
-sn_pcidev_info_get(struct pci_dev *dev)
-{
- struct pcidev_info *pcidev;
-
- list_for_each_entry(pcidev,
- &(SN_PCI_CONTROLLER(dev)->pcidev_info), pdi_list) {
- if (pcidev->pdi_linux_pcidev == dev) {
- return pcidev;
- }
- }
- return NULL;
-}
-
-/* Older PROM flush WAR
- *
- * 01/16/06 -- This war will be in place until a new official PROM is released.
- * Additionally note that the struct sn_flush_device_war also has to be
- * removed from arch/ia64/sn/include/xtalk/hubdev.h
- */
-static u8 war_implemented = 0;
-
-static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
- struct sn_flush_device_common *common)
-{
- struct sn_flush_device_war *war_list;
- struct sn_flush_device_war *dev_entry;
- struct ia64_sal_retval isrv = {0,0,0,0};
-
- if (!war_implemented) {
- printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
- "PROM flush WAR\n");
- war_implemented = 1;
- }
-
- war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
- if (!war_list)
- BUG();
-
- SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
- nasid, widget, __pa(war_list), 0, 0, 0 ,0);
- if (isrv.status)
- panic("sn_device_fixup_war failed: %s\n",
- ia64_sal_strerror(isrv.status));
-
- dev_entry = war_list + device;
- memcpy(common,dev_entry, sizeof(*common));
- kfree(war_list);
-
- return isrv.status;
-}
-
-/*
- * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
- * each node in the system.
- */
-static void __init sn_fixup_ionodes(void)
-{
- struct sn_flush_device_kernel *sn_flush_device_kernel;
- struct sn_flush_device_kernel *dev_entry;
- struct hubdev_info *hubdev;
- u64 status;
- u64 nasid;
- int i, widget, device, size;
-
- /*
- * Get SGI Specific HUB chipset information.
- * Inform Prom that this kernel can support domain bus numbering.
- */
- for (i = 0; i < num_cnodes; i++) {
- hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
- nasid = cnodeid_to_nasid(i);
- hubdev->max_segment_number = 0xffffffff;
- hubdev->max_pcibus_number = 0xff;
- status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev));
- if (status)
- continue;
-
- /* Save the largest Domain and pcibus numbers found. */
- if (hubdev->max_segment_number) {
- /*
- * Dealing with a Prom that supports segments.
- */
- max_segment_number = hubdev->max_segment_number;
- max_pcibus_number = hubdev->max_pcibus_number;
- }
-
- /* Attach the error interrupt handlers */
- if (nasid & 1)
- ice_error_init(hubdev);
- else
- hub_error_init(hubdev);
-
- for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
- hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
-
- if (!hubdev->hdi_flush_nasid_list.widget_p)
- continue;
-
- size = (HUB_WIDGET_ID_MAX + 1) *
- sizeof(struct sn_flush_device_kernel *);
- hubdev->hdi_flush_nasid_list.widget_p =
- kzalloc(size, GFP_KERNEL);
- if (!hubdev->hdi_flush_nasid_list.widget_p)
- BUG();
-
- for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
- size = DEV_PER_WIDGET *
- sizeof(struct sn_flush_device_kernel);
- sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
- if (!sn_flush_device_kernel)
- BUG();
-
- dev_entry = sn_flush_device_kernel;
- for (device = 0; device < DEV_PER_WIDGET;
- device++,dev_entry++) {
- size = sizeof(struct sn_flush_device_common);
- dev_entry->common = kzalloc(size, GFP_KERNEL);
- if (!dev_entry->common)
- BUG();
-
- if (sn_prom_feature_available(
- PRF_DEVICE_FLUSH_LIST))
- status = sal_get_device_dmaflush_list(
- nasid, widget, device,
- (u64)(dev_entry->common));
- else
-#ifdef XEN
- BUG();
-#else
- status = sn_device_fixup_war(nasid,
- widget, device,
- dev_entry->common);
-#endif
- if (status != SALRET_OK)
- panic("SAL call failed: %s\n",
- ia64_sal_strerror(status));
-
- spin_lock_init(&dev_entry->sfdl_flush_lock);
- }
-
- if (sn_flush_device_kernel)
- hubdev->hdi_flush_nasid_list.widget_p[widget] =
- sn_flush_device_kernel;
- }
- }
-}
-
-/*
- * sn_pci_window_fixup() - Create a pci_window for each device resource.
- * Until ACPI support is added, we need this code
- * to setup pci_windows for use by
- * pcibios_bus_to_resource(),
- * pcibios_resource_to_bus(), etc.
- */
-static void
-sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
- s64 * pci_addrs)
-{
- struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
- unsigned int i;
- unsigned int idx;
- unsigned int new_count;
- struct pci_window *new_window;
-
- if (count == 0)
- return;
- idx = controller->windows;
- new_count = controller->windows + count;
- new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
- if (new_window == NULL)
- BUG();
- if (controller->window) {
- memcpy(new_window, controller->window,
- sizeof(struct pci_window) * controller->windows);
- kfree(controller->window);
- }
-
- /* Setup a pci_window for each device resource. */
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- if (pci_addrs[i] == -1)
- continue;
-
- new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
- new_window[idx].resource = dev->resource[i];
- idx++;
- }
-
- controller->windows = new_count;
- controller->window = new_window;
-}
-
-void sn_pci_unfixup_slot(struct pci_dev *dev)
-{
- struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
-
- sn_irq_unfixup(dev);
- pci_dev_put(host_pci_dev);
- pci_dev_put(dev);
-}
-#endif
-
-#ifndef XEN
-/*
- * sn_pci_fixup_slot() - This routine sets up a slot's resources
- * consistent with the Linux PCI abstraction layer. Resources acquired
- * from our PCI provider include PIO maps to BAR space and interrupt
- * objects.
- */
-void sn_pci_fixup_slot(struct pci_dev *dev)
-{
- unsigned int count = 0;
- int idx;
- int segment = pci_domain_nr(dev->bus);
- int status = 0;
- struct pcibus_bussoft *bs;
- struct pci_bus *host_pci_bus;
- struct pci_dev *host_pci_dev;
- struct pcidev_info *pcidev_info;
- s64 pci_addrs[PCI_ROM_RESOURCE + 1];
- struct sn_irq_info *sn_irq_info;
- unsigned long size;
- unsigned int bus_no, devfn;
-
- pci_dev_get(dev); /* for the sysdata pointer */
- pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
- if (!pcidev_info)
- BUG(); /* Cannot afford to run out of memory */
-
- sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
- if (!sn_irq_info)
- BUG(); /* Cannot afford to run out of memory */
-
- /* Call to retrieve pci device information needed by kernel. */
- status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
- dev->devfn,
- (u64) __pa(pcidev_info),
- (u64) __pa(sn_irq_info));
- if (status)
- BUG(); /* Cannot get platform pci device information */
-
- /* Add pcidev_info to list in sn_pci_controller struct */
- list_add_tail(&pcidev_info->pdi_list,
- &(SN_PCI_CONTROLLER(dev->bus)->pcidev_info));
-
- /* Copy over PIO Mapped Addresses */
- for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
- unsigned long start, end, addr;
-
- if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
- pci_addrs[idx] = -1;
- continue;
- }
-
- start = dev->resource[idx].start;
- end = dev->resource[idx].end;
- size = end - start;
- if (size == 0) {
- pci_addrs[idx] = -1;
- continue;
- }
- pci_addrs[idx] = start;
- count++;
- addr = pcidev_info->pdi_pio_mapped_addr[idx];
- addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
- dev->resource[idx].start = addr;
- dev->resource[idx].end = addr + size;
-#ifndef XEN
- if (dev->resource[idx].flags & IORESOURCE_IO)
- dev->resource[idx].parent = &ioport_resource;
- else
- dev->resource[idx].parent = &iomem_resource;
-#endif
- }
- /* Create a pci_window in the pci_controller struct for
- * each device resource.
- */
- if (count > 0)
- sn_pci_window_fixup(dev, count, pci_addrs);
-
- /*
- * Using the PROMs values for the PCI host bus, get the Linux
- * PCI host_pci_dev struct and set up host bus linkages
- */
-
- bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
- devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
- host_pci_bus = pci_find_bus(segment, bus_no);
- host_pci_dev = pci_get_slot(host_pci_bus, devfn);
-
- pcidev_info->host_pci_dev = host_pci_dev;
- pcidev_info->pdi_linux_pcidev = dev;
- pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
- bs = SN_PCIBUS_BUSSOFT(dev->bus);
- pcidev_info->pdi_pcibus_info = bs;
-
- if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
- SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
- } else {
- SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
- }
-
- /* Only set up IRQ stuff if this device has a host bus context */
- if (bs && sn_irq_info->irq_irq) {
- pcidev_info->pdi_sn_irq_info = sn_irq_info;
- dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
- sn_irq_fixup(dev, sn_irq_info);
- } else {
- pcidev_info->pdi_sn_irq_info = NULL;
- kfree(sn_irq_info);
- }
-}
-#endif
-
-/*
- * sn_pci_controller_fixup() - This routine sets up a bus's resources
- * consistent with the Linux PCI abstraction layer.
- */
-void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
-{
- int status;
- int nasid, cnode;
- struct pci_controller *controller;
- struct sn_pci_controller *sn_controller;
- struct pcibus_bussoft *prom_bussoft_ptr;
- struct hubdev_info *hubdev_info;
- void *provider_soft;
- struct sn_pcibus_provider *provider;
-
- status = sal_get_pcibus_info((u64) segment, (u64) busnum,
- (u64) ia64_tpa(&prom_bussoft_ptr));
- if (status > 0)
- return; /*bus # does not exist */
- prom_bussoft_ptr = __va(prom_bussoft_ptr);
-
- /* Allocate a sn_pci_controller, which has a pci_controller struct
- * as the first member.
- */
- sn_controller = kzalloc(sizeof(struct sn_pci_controller), GFP_KERNEL);
- if (!sn_controller)
- BUG();
- INIT_LIST_HEAD(&sn_controller->pcidev_info);
- controller = &sn_controller->pci_controller;
- controller->segment = segment;
-
-#ifndef XEN
- if (bus == NULL) {
- bus = pci_scan_bus(busnum, &pci_root_ops, controller);
- if (bus == NULL)
- goto error_return; /* error, or bus already scanned */
- bus->sysdata = NULL;
- }
-
- if (bus->sysdata)
- goto error_return; /* sysdata already alloc'd */
-
- /*
- * Per-provider fixup. Copies the contents from prom to local
- * area and links SN_PCIBUS_BUSSOFT().
- */
-
- if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES)
- goto error_return; /* unsupported asic type */
-
- if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
- goto error_return; /* no further fixup necessary */
-
-#endif
- provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
- if (provider == NULL)
- goto error_return; /* no provider registerd for this asic */
-
- bus->sysdata = controller;
- if (provider->bus_fixup)
- provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
- else
- provider_soft = NULL;
-
- if (provider_soft == NULL) {
- /* fixup failed or not applicable */
- bus->sysdata = NULL;
- goto error_return;
- }
-
- /*
- * Setup pci_windows for legacy IO and MEM space.
- * (Temporary until ACPI support is in place.)
- */
- controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL);
- if (controller->window == NULL)
- BUG();
- controller->window[0].offset = prom_bussoft_ptr->bs_legacy_io;
- controller->window[0].resource.name = "legacy_io";
- controller->window[0].resource.flags = IORESOURCE_IO;
- controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io;
- controller->window[0].resource.end =
- controller->window[0].resource.start + 0xffff;
-#ifndef XEN
- controller->window[0].resource.parent = &ioport_resource;
-#endif
- controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem;
- controller->window[1].resource.name = "legacy_mem";
- controller->window[1].resource.flags = IORESOURCE_MEM;
- controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem;
- controller->window[1].resource.end =
- controller->window[1].resource.start + (1024 * 1024) - 1;
-#ifndef XEN
- controller->window[1].resource.parent = &iomem_resource;
-#endif
- controller->windows = 2;
-
- /*
- * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
- * after this point.
- */
-
- PCI_CONTROLLER(bus)->platform_data = provider_soft;
- nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
- cnode = nasid_to_cnodeid(nasid);
- hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
- SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
- &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
-
- /*
- * If the node information we obtained during the fixup phase is invalid
- * then set controller->node to -1 (undetermined)
- */
- if (controller->node >= num_online_nodes()) {
- struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
-
- printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
- "L_IO=%lx L_MEM=%lx BASE=%lx\n",
- b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
- b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
- printk(KERN_WARNING "on node %d but only %d nodes online."
- "Association set to undetermined.\n",
- controller->node, num_online_nodes());
- controller->node = -1;
- }
- return;
-
-error_return:
-
- kfree(sn_controller);
- return;
-}
-
-#ifndef XEN
-void sn_bus_store_sysdata(struct pci_dev *dev)
-{
- struct sysdata_el *element;
-
- element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
- if (!element) {
- dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
- return;
- }
- element->sysdata = SN_PCIDEV_INFO(dev);
- list_add(&element->entry, &sn_sysdata_list);
-}
-
-void sn_bus_free_sysdata(void)
-{
- struct sysdata_el *element;
- struct list_head *list, *safe;
-
- list_for_each_safe(list, safe, &sn_sysdata_list) {
- element = list_entry(list, struct sysdata_el, entry);
- list_del(&element->entry);
- list_del(&(((struct pcidev_info *)
- (element->sysdata))->pdi_list));
- kfree(element->sysdata);
- kfree(element);
- }
- return;
-}
-#endif
-
-/*
- * Ugly hack to get PCI setup until we have a proper ACPI namespace.
- */
-
-#define PCI_BUSES_TO_SCAN 256
-
-static int __init sn_io_early_init(void)
-{
- int i, j;
-#ifndef XEN
- struct pci_dev *pci_dev = NULL;
-#endif
-
- if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
- return 0;
-
- /*
- * prime sn_pci_provider[]. Individial provider init routines will
- * override their respective default entries.
- */
-
- for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
- sn_pci_provider[i] = &sn_pci_default_provider;
-
-#ifndef XEN
- pcibr_init_provider();
- tioca_init_provider();
- tioce_init_provider();
-#endif
-
- /*
- * This is needed to avoid bounce limit checks in the blk layer
- */
- ia64_max_iommu_merge_mask = ~PAGE_MASK;
-#ifndef XEN
- sn_fixup_ionodes();
-#endif
- sn_irq_lh_init();
- INIT_LIST_HEAD(&sn_sysdata_list);
-#ifndef XEN
- sn_init_cpei_timer();
-
-#ifdef CONFIG_PROC_FS
- register_sn_procfs();
-#endif
-#endif
- /* busses are not known yet ... */
- for (i = 0; i <= max_segment_number; i++)
- for (j = 0; j <= max_pcibus_number; j++)
- sn_pci_controller_fixup(i, j, NULL);
-
- /*
- * Generic Linux PCI Layer has created the pci_bus and pci_dev
- * structures - time for us to add our SN PLatform specific
- * information.
- */
-
-#ifndef XEN
- while ((pci_dev =
- pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
- sn_pci_fixup_slot(pci_dev);
-#endif
-
- sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */
-
- return 0;
-}
-
-/*
- * hubdev_init_node() - Creates the HUB data structure and link them to it's
- * own NODE specific data area.
- */
-void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
-{
- struct hubdev_info *hubdev_info;
- int size;
-#ifndef XEN
- pg_data_t *pg;
-#else
- struct pglist_data *pg;
-#endif
-
- size = sizeof(struct hubdev_info);
-
- if (node >= num_online_nodes()) /* Headless/memless IO nodes */
- pg = NODE_DATA(0);
- else
- pg = NODE_DATA(node);
-
- hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
-
- npda->pdinfo = (void *)hubdev_info;
-}
-
-geoid_t
-cnodeid_get_geoid(cnodeid_t cnode)
-{
- struct hubdev_info *hubdev;
-
- hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
- return hubdev->hdi_geoid;
-}
-
-#ifndef XEN
-void sn_generate_path(struct pci_bus *pci_bus, char *address)
-{
- nasid_t nasid;
- cnodeid_t cnode;
- geoid_t geoid;
- moduleid_t moduleid;
- u16 bricktype;
-
- nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
- cnode = nasid_to_cnodeid(nasid);
- geoid = cnodeid_get_geoid(cnode);
- moduleid = geo_module(geoid);
-
- snprintf(address, 15, "module_%c%c%c%c%.2d",
- '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
- '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
- '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
- MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
-
- /* Tollhouse requires slot id to be displayed */
- bricktype = MODULE_GET_BTYPE(moduleid);
- if ((bricktype == L1_BRICKTYPE_191010) ||
- (bricktype == L1_BRICKTYPE_1932))
- snprintf(address, 15+8, "%s^%d", address, geo_slot(geoid));
-}
-#endif
-
-#ifdef XEN
-__initcall(sn_io_early_init);
-#else
-subsys_initcall(sn_io_early_init);
-#endif
-#ifndef XEN
-EXPORT_SYMBOL(sn_pci_fixup_slot);
-EXPORT_SYMBOL(sn_pci_unfixup_slot);
-EXPORT_SYMBOL(sn_pci_controller_fixup);
-EXPORT_SYMBOL(sn_bus_store_sysdata);
-EXPORT_SYMBOL(sn_bus_free_sysdata);
-EXPORT_SYMBOL(sn_generate_path);
-#endif
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/iomv.c b/xen/arch/ia64/linux-xen/sn/kernel/iomv.c
deleted file mode 100644
index 3e38cb05e0..0000000000
--- a/xen/arch/ia64/linux-xen/sn/kernel/iomv.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/module.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#ifndef XEN
-#include <asm/vga.h>
-#endif
-#include <asm/sn/nodepda.h>
-#include <asm/sn/simulator.h>
-#include <asm/sn/pda.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/shub_mmr.h>
-
-#define IS_LEGACY_VGA_IOPORT(p) \
- (((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
-
-#ifdef XEN
-#define vga_console_iobase 0
-#endif
-
-/**
- * sn_io_addr - convert an in/out port to an i/o address
- * @port: port to convert
- *
- * Legacy in/out instructions are converted to ld/st instructions
- * on IA64. This routine will convert a port number into a valid
- * SN i/o address. Used by sn_in*() and sn_out*().
- */
-void *sn_io_addr(unsigned long port)
-{
- if (!IS_RUNNING_ON_SIMULATOR()) {
- if (IS_LEGACY_VGA_IOPORT(port))
- port += vga_console_iobase;
- /* On sn2, legacy I/O ports don't point at anything */
- if (port < (64 * 1024))
- return NULL;
- return ((void *)(port | __IA64_UNCACHED_OFFSET));
- } else {
- /* but the simulator uses them... */
- unsigned long addr;
-
- /*
- * word align port, but need more than 10 bits
- * for accessing registers in bedrock local block
- * (so we don't do port&0xfff)
- */
- addr = (is_shub2() ? 0xc00000028c000000UL : 0xc0000087cc000000UL) | ((port >> 2) << 12);
- if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port == 0x3f7)
- addr |= port;
- return (void *)addr;
- }
-}
-
-EXPORT_SYMBOL(sn_io_addr);
-
-/**
- * __sn_mmiowb - I/O space memory barrier
- *
- * See include/asm-ia64/io.h and Documentation/DocBook/deviceiobook.tmpl
- * for details.
- *
- * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
- * See PV 871084 for details about the WAR about zero value.
- *
- */
-void __sn_mmiowb(void)
-{
- volatile unsigned long *adr = pda->pio_write_status_addr;
- unsigned long val = pda->pio_write_status_val;
-
- while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val)
- cpu_relax();
-}
-
-EXPORT_SYMBOL(__sn_mmiowb);
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/irq.c b/xen/arch/ia64/linux-xen/sn/kernel/irq.c
deleted file mode 100644
index eac3e96941..0000000000
--- a/xen/arch/ia64/linux-xen/sn/kernel/irq.c
+++ /dev/null
@@ -1,576 +0,0 @@
-/*
- * Platform dependent support for SGI SN
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-#include <linux/irq.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#ifdef XEN
-#include <linux/linux-pci.h>
-#include <asm/hw_irq.h>
-#endif
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/pcibr_provider.h>
-#include <asm/sn/pcibus_provider_defs.h>
-#ifndef XEN
-#include <asm/sn/pcidev.h>
-#endif
-#include <asm/sn/shub_mmr.h>
-#include <asm/sn/sn_sal.h>
-
-#ifdef XEN
-#define pci_dev_get(dev) do {} while(0)
-#define move_native_irq(foo) do {} while(0)
-#endif
-
-static void force_interrupt(int irq);
-#ifndef XEN
-static void register_intr_pda(struct sn_irq_info *sn_irq_info);
-static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
-#endif
-
-int sn_force_interrupt_flag = 1;
-extern int sn_ioif_inited;
-struct list_head **sn_irq_lh;
-static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
-
-u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
- struct sn_irq_info *sn_irq_info,
- int req_irq, nasid_t req_nasid,
- int req_slice)
-{
- struct ia64_sal_retval ret_stuff;
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
-
- SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
- (u64) SAL_INTR_ALLOC, (u64) local_nasid,
- (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
- (u64) req_nasid, (u64) req_slice);
-
- return ret_stuff.status;
-}
-
-void sn_intr_free(nasid_t local_nasid, int local_widget,
- struct sn_irq_info *sn_irq_info)
-{
- struct ia64_sal_retval ret_stuff;
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
-
- SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
- (u64) SAL_INTR_FREE, (u64) local_nasid,
- (u64) local_widget, (u64) sn_irq_info->irq_irq,
- (u64) sn_irq_info->irq_cookie, 0, 0);
-}
-
-#ifndef XEN
-static unsigned int sn_startup_irq(unsigned int irq)
-{
- return 0;
-}
-
-static void sn_shutdown_irq(unsigned int irq)
-{
-}
-
-static void sn_disable_irq(unsigned int irq)
-{
-}
-
-static void sn_enable_irq(unsigned int irq)
-{
-}
-#endif
-
-#ifdef XEN
-static void sn_ack_irq(struct irq_desc *desc)
-{
- unsigned int irq = desc->irq;
-#else
-static void sn_ack_irq(unsigned int irq)
-{
-#endif
- u64 event_occurred, mask;
-
- irq = irq & 0xff;
- event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
- mask = event_occurred & SH_ALL_INT_MASK;
- HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
- __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
-
- move_native_irq(irq);
-}
-
-#ifdef XEN
-static void sn_end_irq(struct irq_desc *desc)
-{
- unsigned int irq = desc->irq;
-#else
-static void sn_end_irq(unsigned int irq)
-{
-#endif
- int ivec;
- u64 event_occurred;
-
- ivec = irq & 0xff;
- if (ivec == SGI_UART_VECTOR) {
- event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
- /* If the UART bit is set here, we may have received an
- * interrupt from the UART that the driver missed. To
- * make sure, we IPI ourselves to force us to look again.
- */
- if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
- platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
- IA64_IPI_DM_INT, 0);
- }
- }
- __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
- if (sn_force_interrupt_flag)
- force_interrupt(irq);
-}
-
-#ifndef XEN
-static void sn_irq_info_free(struct rcu_head *head);
-
-struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
- nasid_t nasid, int slice)
-{
- int vector;
- int cpuphys;
- int64_t bridge;
- int local_widget, status;
- nasid_t local_nasid;
- struct sn_irq_info *new_irq_info;
- struct sn_pcibus_provider *pci_provider;
-
- new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
- if (new_irq_info == NULL)
- return NULL;
-
- memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
-
- bridge = (u64) new_irq_info->irq_bridge;
- if (!bridge) {
- kfree(new_irq_info);
- return NULL; /* irq is not a device interrupt */
- }
-
- local_nasid = NASID_GET(bridge);
-
- if (local_nasid & 1)
- local_widget = TIO_SWIN_WIDGETNUM(bridge);
- else
- local_widget = SWIN_WIDGETNUM(bridge);
-
- vector = sn_irq_info->irq_irq;
- /* Free the old PROM new_irq_info structure */
- sn_intr_free(local_nasid, local_widget, new_irq_info);
- /* Update kernels new_irq_info with new target info */
- unregister_intr_pda(new_irq_info);
-
- /* allocate a new PROM new_irq_info struct */
- status = sn_intr_alloc(local_nasid, local_widget,
- new_irq_info, vector,
- nasid, slice);
-
- /* SAL call failed */
- if (status) {
- kfree(new_irq_info);
- return NULL;
- }
-
- cpuphys = nasid_slice_to_cpuid(nasid, slice);
- new_irq_info->irq_cpuid = cpuphys;
- register_intr_pda(new_irq_info);
-
- pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
-
- /*
- * If this represents a line interrupt, target it. If it's
- * an msi (irq_int_bit < 0), it's already targeted.
- */
- if (new_irq_info->irq_int_bit >= 0 &&
- pci_provider && pci_provider->target_interrupt)
- (pci_provider->target_interrupt)(new_irq_info);
-
- spin_lock(&sn_irq_info_lock);
-#ifdef XEN
- list_replace(&sn_irq_info->list, &new_irq_info->list);
-#else
- list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
-#endif
- spin_unlock(&sn_irq_info_lock);
-#ifndef XEN
- call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
-#endif
-
-#ifdef CONFIG_SMP
- set_irq_affinity_info((vector & 0xff), cpuphys, 0);
-#endif
-
- return new_irq_info;
-}
-
-static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
-{
- struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
- nasid_t nasid;
- int slice;
-
- nasid = cpuid_to_nasid(first_cpu(mask));
- slice = cpuid_to_slice(first_cpu(mask));
-
- list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
- sn_irq_lh[irq], list)
- (void)sn_retarget_vector(sn_irq_info, nasid, slice);
-}
-#endif
-
-static hw_irq_controller irq_type_sn = {
-#ifndef XEN
- .name = "SN hub",
- .startup = sn_startup_irq,
- .shutdown = sn_shutdown_irq,
- .enable = sn_enable_irq,
- .disable = sn_disable_irq,
-#else
- .typename = "SN hub",
- .startup = irq_startup_none,
- .shutdown = irq_shutdown_none,
- .enable = irq_enable_none,
- .disable = irq_disable_none,
-#endif
- .ack = sn_ack_irq,
- .end = sn_end_irq,
-#ifndef XEN
- .set_affinity = sn_set_affinity_irq
-#endif
-};
-
-unsigned int sn_local_vector_to_irq(u8 vector)
-{
- return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
-}
-
-void sn_irq_init(void)
-{
- int i;
- irq_desc_t *base_desc = irq_desc;
-
-#ifndef XEN
- ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
- ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
-#endif
-
- for (i = 0; i < NR_IRQS; i++) {
-#ifdef XEN
- if (base_desc[i].handler == &no_irq_type) {
- base_desc[i].handler = &irq_type_sn;
-#else
- if (base_desc[i].chip == &no_irq_type) {
- base_desc[i].chip = &irq_type_sn;
-#endif
- }
- }
-}
-
-static void register_intr_pda(struct sn_irq_info *sn_irq_info)
-{
- int irq = sn_irq_info->irq_irq;
- int cpu = sn_irq_info->irq_cpuid;
-
- if (pdacpu(cpu)->sn_last_irq < irq) {
- pdacpu(cpu)->sn_last_irq = irq;
- }
-
- if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
- pdacpu(cpu)->sn_first_irq = irq;
-}
-
-#ifndef XEN
-static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
-{
- int irq = sn_irq_info->irq_irq;
- int cpu = sn_irq_info->irq_cpuid;
- struct sn_irq_info *tmp_irq_info;
- int i, foundmatch;
-
-#ifndef XEN
- rcu_read_lock();
-#else
- spin_lock(&sn_irq_info_lock);
-#endif
- if (pdacpu(cpu)->sn_last_irq == irq) {
- foundmatch = 0;
- for (i = pdacpu(cpu)->sn_last_irq - 1;
- i && !foundmatch; i--) {
-#ifdef XEN
- list_for_each_entry(tmp_irq_info,
- sn_irq_lh[i],
- list) {
-#else
- list_for_each_entry_rcu(tmp_irq_info,
- sn_irq_lh[i],
- list) {
-#endif
- if (tmp_irq_info->irq_cpuid == cpu) {
- foundmatch = 1;
- break;
- }
- }
- }
- pdacpu(cpu)->sn_last_irq = i;
- }
-
- if (pdacpu(cpu)->sn_first_irq == irq) {
- foundmatch = 0;
- for (i = pdacpu(cpu)->sn_first_irq + 1;
- i < NR_IRQS && !foundmatch; i++) {
-#ifdef XEN
- list_for_each_entry(tmp_irq_info,
- sn_irq_lh[i],
- list) {
-#else
- list_for_each_entry_rcu(tmp_irq_info,
- sn_irq_lh[i],
- list) {
-#endif
- if (tmp_irq_info->irq_cpuid == cpu) {
- foundmatch = 1;
- break;
- }
- }
- }
- pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
- }
-#ifndef XEN
- rcu_read_unlock();
-#else
- spin_unlock(&sn_irq_info_lock);
-#endif
-}
-
-static void sn_irq_info_free(struct rcu_head *head)
-{
- struct sn_irq_info *sn_irq_info;
-
- sn_irq_info = container_of(head, struct sn_irq_info, rcu);
- kfree(sn_irq_info);
-}
-#endif
-
-#ifdef XEN
-void sn_irq_fixup(struct sn_pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
-#else
-void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
-#endif
-{
- nasid_t nasid = sn_irq_info->irq_nasid;
- int slice = sn_irq_info->irq_slice;
- int cpu = nasid_slice_to_cpuid(nasid, slice);
-
- pci_dev_get(pci_dev);
- sn_irq_info->irq_cpuid = cpu;
-#ifndef XEN
- sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
-#endif
-
- /* link it into the sn_irq[irq] list */
- spin_lock(&sn_irq_info_lock);
-#ifdef XEN
- list_add(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
-#else
- list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
-#endif
-#ifndef XEN
- reserve_irq_vector(sn_irq_info->irq_irq);
-#endif
- spin_unlock(&sn_irq_info_lock);
-
- register_intr_pda(sn_irq_info);
-}
-
-#ifdef XEN
-void sn_irq_unfixup(struct sn_pci_dev *pci_dev)
-#else
-void sn_irq_unfixup(struct pci_dev *pci_dev)
-#endif
-{
-#ifndef XEN
- struct sn_irq_info *sn_irq_info;
-
- /* Only cleanup IRQ stuff if this device has a host bus context */
- if (!SN_PCIDEV_BUSSOFT(pci_dev))
- return;
-
- sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
- if (!sn_irq_info)
- return;
- if (!sn_irq_info->irq_irq) {
- kfree(sn_irq_info);
- return;
- }
-
- unregister_intr_pda(sn_irq_info);
- spin_lock(&sn_irq_info_lock);
-#ifdef XEN
- list_del(&sn_irq_info->list);
-#else
- list_del_rcu(&sn_irq_info->list);
-#endif
- spin_unlock(&sn_irq_info_lock);
- if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
- free_irq_vector(sn_irq_info->irq_irq);
-#ifndef XEN
- call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
-#endif
- pci_dev_put(pci_dev);
-
-#endif
-}
-
-static inline void
-sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
-{
- struct sn_pcibus_provider *pci_provider;
-
- pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
- if (pci_provider && pci_provider->force_interrupt)
- (*pci_provider->force_interrupt)(sn_irq_info);
-}
-
-static void force_interrupt(int irq)
-{
- struct sn_irq_info *sn_irq_info;
-
-#ifndef XEN
- if (!sn_ioif_inited)
- return;
-#endif
-
-#ifdef XEN
- spin_lock(&sn_irq_info_lock);
-#else
- rcu_read_lock();
-#endif
-#ifdef XEN
- list_for_each_entry(sn_irq_info, sn_irq_lh[irq], list)
-#else
- list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
-#endif
- sn_call_force_intr_provider(sn_irq_info);
-
-#ifdef XEN
- spin_unlock(&sn_irq_info_lock);
-#else
- rcu_read_unlock();
-#endif
-}
-
-#ifndef XEN
-/*
- * Check for lost interrupts. If the PIC int_status reg. says that
- * an interrupt has been sent, but not handled, and the interrupt
- * is not pending in either the cpu irr regs or in the soft irr regs,
- * and the interrupt is not in service, then the interrupt may have
- * been lost. Force an interrupt on that pin. It is possible that
- * the interrupt is in flight, so we may generate a spurious interrupt,
- * but we should never miss a real lost interrupt.
- */
-static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
-{
- u64 regval;
- struct pcidev_info *pcidev_info;
- struct pcibus_info *pcibus_info;
-
- /*
- * Bridge types attached to TIO (anything but PIC) do not need this WAR
- * since they do not target Shub II interrupt registers. If that
- * ever changes, this check needs to accomodate.
- */
- if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
- return;
-
- pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
- if (!pcidev_info)
- return;
-
- pcibus_info =
- (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
- pdi_pcibus_info;
- regval = pcireg_intr_status_get(pcibus_info);
-
- if (!ia64_get_irr(irq_to_vector(irq))) {
- if (!test_bit(irq, pda->sn_in_service_ivecs)) {
- regval &= 0xff;
- if (sn_irq_info->irq_int_bit & regval &
- sn_irq_info->irq_last_intr) {
- regval &= ~(sn_irq_info->irq_int_bit & regval);
- sn_call_force_intr_provider(sn_irq_info);
- }
- }
- }
- sn_irq_info->irq_last_intr = regval;
-}
-#endif
-
-void sn_lb_int_war_check(void)
-{
-#ifndef XEN
- struct sn_irq_info *sn_irq_info;
- int i;
-
-#ifdef XEN
- if (pda->sn_first_irq == 0)
-#else
- if (!sn_ioif_inited || pda->sn_first_irq == 0)
-#endif
- return;
-
-#ifdef XEN
- spin_lock(&sn_irq_info_lock);
-#else
- rcu_read_lock();
-#endif
- for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
-#ifdef XEN
- list_for_each_entry(sn_irq_info, sn_irq_lh[i], list) {
-#else
- list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
-#endif
- sn_check_intr(i, sn_irq_info);
- }
- }
-#ifdef XEN
- spin_unlock(&sn_irq_info_lock);
-#else
- rcu_read_unlock();
-#endif
-#endif
-}
-
-void __init sn_irq_lh_init(void)
-{
- int i;
-
- sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
- if (!sn_irq_lh)
- panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
-
- for (i = 0; i < NR_IRQS; i++) {
- sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!sn_irq_lh[i])
- panic("SN PCI INIT: Failed IRQ memory allocation\n");
-
- INIT_LIST_HEAD(sn_irq_lh[i]);
- }
-}
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/setup.c b/xen/arch/ia64/linux-xen/sn/kernel/setup.c
deleted file mode 100644
index a62cf54de7..0000000000
--- a/xen/arch/ia64/linux-xen/sn/kernel/setup.c
+++ /dev/null
@@ -1,803 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#ifndef XEN
-#include <linux/kdev_t.h>
-#endif
-#include <linux/string.h>
-#ifndef XEN
-#include <linux/screen_info.h>
-#endif
-#include <linux/console.h>
-#include <linux/timex.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/serial.h>
-#include <linux/irq.h>
-#include <linux/bootmem.h>
-#include <linux/mmzone.h>
-#include <linux/interrupt.h>
-#include <linux/acpi.h>
-#include <linux/compiler.h>
-#include <linux/sched.h>
-#ifndef XEN
-#include <linux/root_dev.h>
-#endif
-#include <linux/nodemask.h>
-#include <linux/pm.h>
-#include <linux/efi.h>
-
-#include <asm/io.h>
-#include <asm/sal.h>
-#include <asm/machvec.h>
-#include <asm/system.h>
-#include <asm/processor.h>
-#ifndef XEN
-#include <asm/vga.h>
-#endif
-#include <asm/sn/arch.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/pda.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/simulator.h>
-#include <asm/sn/leds.h>
-#ifndef XEN
-#include <asm/sn/bte.h>
-#endif
-#include <asm/sn/shub_mmr.h>
-#ifndef XEN
-#include <asm/sn/clksupport.h>
-#endif
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/geo.h>
-#include <asm/sn/sn_feature_sets.h>
-#ifndef XEN
-#include "xtalk/xwidgetdev.h"
-#include "xtalk/hubdev.h"
-#else
-#include "asm/sn/xwidgetdev.h"
-#include "asm/sn/hubdev.h"
-#endif
-#include <asm/sn/klconfig.h>
-#ifdef XEN
-#include <asm/sn/shubio.h>
-#endif
-
-
-DEFINE_PER_CPU(struct pda_s, pda_percpu);
-
-#define MAX_PHYS_MEMORY (1UL << IA64_MAX_PHYS_BITS) /* Max physical address supported */
-
-extern void bte_init_node(nodepda_t *, cnodeid_t);
-
-extern void sn_timer_init(void);
-extern unsigned long last_time_offset;
-extern void (*ia64_mark_idle) (int);
-extern void snidle(int);
-extern unsigned long long (*ia64_printk_clock)(void);
-
-unsigned long sn_rtc_cycles_per_second;
-EXPORT_SYMBOL(sn_rtc_cycles_per_second);
-
-DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
-EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
-
-DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
-EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
-
-DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
-EXPORT_PER_CPU_SYMBOL(__sn_nodepda);
-
-char sn_system_serial_number_string[128];
-EXPORT_SYMBOL(sn_system_serial_number_string);
-u64 sn_partition_serial_number;
-EXPORT_SYMBOL(sn_partition_serial_number);
-u8 sn_partition_id;
-EXPORT_SYMBOL(sn_partition_id);
-u8 sn_system_size;
-EXPORT_SYMBOL(sn_system_size);
-u8 sn_sharing_domain_size;
-EXPORT_SYMBOL(sn_sharing_domain_size);
-u8 sn_coherency_id;
-EXPORT_SYMBOL(sn_coherency_id);
-u8 sn_region_size;
-EXPORT_SYMBOL(sn_region_size);
-int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
-
-short physical_node_map[MAX_NUMALINK_NODES];
-static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS];
-
-EXPORT_SYMBOL(physical_node_map);
-
-int num_cnodes;
-
-static void sn_init_pdas(char **);
-static void build_cnode_tables(void);
-
-static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
-
-#ifndef XEN
-/*
- * The format of "screen_info" is strange, and due to early i386-setup
- * code. This is just enough to make the console code think we're on a
- * VGA color display.
- */
-struct screen_info sn_screen_info = {
- .orig_x = 0,
- .orig_y = 0,
- .orig_video_mode = 3,
- .orig_video_cols = 80,
- .orig_video_ega_bx = 3,
- .orig_video_lines = 25,
- .orig_video_isVGA = 1,
- .orig_video_points = 16
-};
-#endif
-
-/*
- * This routine can only be used during init, since
- * smp_boot_data is an init data structure.
- * We have to use smp_boot_data.cpu_phys_id to find
- * the physical id of the processor because the normal
- * cpu_physical_id() relies on data structures that
- * may not be initialized yet.
- */
-
-static int __init pxm_to_nasid(int pxm)
-{
- int i;
- int nid;
-
- nid = pxm_to_node(pxm);
- for (i = 0; i < num_node_memblks; i++) {
- if (node_memblk[i].nid == nid) {
- return NASID_GET(node_memblk[i].start_paddr);
- }
- }
- return -1;
-}
-
-/**
- * early_sn_setup - early setup routine for SN platforms
- *
- * Sets up an initial console to aid debugging. Intended primarily
- * for bringup. See start_kernel() in init/main.c.
- */
-
-void __init early_sn_setup(void)
-{
- efi_system_table_t *efi_systab;
- efi_config_table_t *config_tables;
- struct ia64_sal_systab *sal_systab;
- struct ia64_sal_desc_entry_point *ep;
- char *p;
- int i, j;
-
- /*
- * Parse enough of the SAL tables to locate the SAL entry point. Since, console
- * IO on SN2 is done via SAL calls, early_printk won't work without this.
- *
- * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
- * Any changes to those file may have to be made hereas well.
- */
- efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
- config_tables = __va(efi_systab->tables);
- for (i = 0; i < efi_systab->nr_tables; i++) {
- if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) ==
- 0) {
- sal_systab = __va(config_tables[i].table);
- p = (char *)(sal_systab + 1);
- for (j = 0; j < sal_systab->entry_count; j++) {
- if (*p == SAL_DESC_ENTRY_POINT) {
- ep = (struct ia64_sal_desc_entry_point
- *)p;
- ia64_sal_handler_init(__va
- (ep->sal_proc),
- __va(ep->gp));
- return;
- }
- p += SAL_DESC_SIZE(*p);
- }
- }
- }
- /* Uh-oh, SAL not available?? */
- printk(KERN_ERR "failed to find SAL entry point\n");
-}
-
-extern int platform_intr_list[];
-static int __initdata shub_1_1_found;
-
-/*
- * sn_check_for_wars
- *
- * Set flag for enabling shub specific wars
- */
-
-static inline int __init is_shub_1_1(int nasid)
-{
- unsigned long id;
- int rev;
-
- if (is_shub2())
- return 0;
- id = REMOTE_HUB_L(nasid, SH1_SHUB_ID);
- rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT;
- return rev <= 2;
-}
-
-static void __init sn_check_for_wars(void)
-{
- int cnode;
-
- if (is_shub2()) {
- /* none yet */
- } else {
- for_each_online_node(cnode) {
- if (is_shub_1_1(cnodeid_to_nasid(cnode)))
- shub_1_1_found = 1;
- }
- }
-}
-
-#ifndef XEN
-/*
- * Scan the EFI PCDP table (if it exists) for an acceptable VGA console
- * output device. If one exists, pick it and set sn_legacy_{io,mem} to
- * reflect the bus offsets needed to address it.
- *
- * Since pcdp support in SN is not supported in the 2.4 kernel (or at least
- * the one lbs is based on) just declare the needed structs here.
- *
- * Reference spec http://www.dig64.org/specifications/DIG64_PCDPv20.pdf
- *
- * Returns 0 if no acceptable vga is found, !0 otherwise.
- *
- * Note: This stuff is duped here because Altix requires the PCDP to
- * locate a usable VGA device due to lack of proper ACPI support. Structures
- * could be used from drivers/firmware/pcdp.h, but it was decided that moving
- * this file to a more public location just for Altix use was undesireable.
- */
-
-struct hcdp_uart_desc {
- u8 pad[45];
-};
-
-struct pcdp {
- u8 signature[4]; /* should be 'HCDP' */
- u32 length;
- u8 rev; /* should be >=3 for pcdp, <3 for hcdp */
- u8 sum;
- u8 oem_id[6];
- u64 oem_tableid;
- u32 oem_rev;
- u32 creator_id;
- u32 creator_rev;
- u32 num_type0;
- struct hcdp_uart_desc uart[0]; /* num_type0 of these */
- /* pcdp descriptors follow */
-} __attribute__((packed));
-
-struct pcdp_device_desc {
- u8 type;
- u8 primary;
- u16 length;
- u16 index;
- /* interconnect specific structure follows */
- /* device specific structure follows that */
-} __attribute__((packed));
-
-struct pcdp_interface_pci {
- u8 type; /* 1 == pci */
- u8 reserved;
- u16 length;
- u8 segment;
- u8 bus;
- u8 dev;
- u8 fun;
- u16 devid;
- u16 vendid;
- u32 acpi_interrupt;
- u64 mmio_tra;
- u64 ioport_tra;
- u8 flags;
- u8 translation;
-} __attribute__((packed));
-
-struct pcdp_vga_device {
- u8 num_eas_desc;
- /* ACPI Extended Address Space Desc follows */
-} __attribute__((packed));
-
-/* from pcdp_device_desc.primary */
-#define PCDP_PRIMARY_CONSOLE 0x01
-
-/* from pcdp_device_desc.type */
-#define PCDP_CONSOLE_INOUT 0x0
-#define PCDP_CONSOLE_DEBUG 0x1
-#define PCDP_CONSOLE_OUT 0x2
-#define PCDP_CONSOLE_IN 0x3
-#define PCDP_CONSOLE_TYPE_VGA 0x8
-
-#define PCDP_CONSOLE_VGA (PCDP_CONSOLE_TYPE_VGA | PCDP_CONSOLE_OUT)
-
-/* from pcdp_interface_pci.type */
-#define PCDP_IF_PCI 1
-
-/* from pcdp_interface_pci.translation */
-#define PCDP_PCI_TRANS_IOPORT 0x02
-#define PCDP_PCI_TRANS_MMIO 0x01
-
-#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
-static void
-sn_scan_pcdp(void)
-{
- u8 *bp;
- struct pcdp *pcdp;
- struct pcdp_device_desc device;
- struct pcdp_interface_pci if_pci;
- extern struct efi efi;
-
- if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
- return; /* no hcdp/pcdp table */
-
- pcdp = __va(efi.hcdp);
-
- if (pcdp->rev < 3)
- return; /* only support PCDP (rev >= 3) */
-
- for (bp = (u8 *)&pcdp->uart[pcdp->num_type0];
- bp < (u8 *)pcdp + pcdp->length;
- bp += device.length) {
- memcpy(&device, bp, sizeof(device));
- if (! (device.primary & PCDP_PRIMARY_CONSOLE))
- continue; /* not primary console */
-
- if (device.type != PCDP_CONSOLE_VGA)
- continue; /* not VGA descriptor */
-
- memcpy(&if_pci, bp+sizeof(device), sizeof(if_pci));
- if (if_pci.type != PCDP_IF_PCI)
- continue; /* not PCI interconnect */
-
- if (if_pci.translation & PCDP_PCI_TRANS_IOPORT)
- vga_console_iobase =
- if_pci.ioport_tra | __IA64_UNCACHED_OFFSET;
-
- if (if_pci.translation & PCDP_PCI_TRANS_MMIO)
- vga_console_membase =
- if_pci.mmio_tra | __IA64_UNCACHED_OFFSET;
-
- break; /* once we find the primary, we're done */
- }
-}
-#endif
-
-static unsigned long sn2_rtc_initial;
-
-static unsigned long long ia64_sn2_printk_clock(void)
-{
- unsigned long rtc_now = rtc_time();
-
- return (rtc_now - sn2_rtc_initial) *
- (1000000000 / sn_rtc_cycles_per_second);
-}
-#endif
-
-/**
- * sn_setup - SN platform setup routine
- * @cmdline_p: kernel command line
- *
- * Handles platform setup for SN machines. This includes determining
- * the RTC frequency (via a SAL call), initializing secondary CPUs, and
- * setting up per-node data areas. The console is also initialized here.
- */
-#ifdef XEN
-void __cpuinit sn_cpu_init(void);
-#endif
-
-void __init sn_setup(char **cmdline_p)
-{
-#ifndef XEN
- long status, ticks_per_sec, drift;
-#else
- unsigned long status, ticks_per_sec, drift;
-#endif
- u32 version = sn_sal_rev();
-#ifndef XEN
- extern void sn_cpu_init(void);
-
- sn2_rtc_initial = rtc_time();
- ia64_sn_plat_set_error_handling_features(); // obsolete
- ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
- ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
-
-
-#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
- /*
- * Handle SN vga console.
- *
- * SN systems do not have enough ACPI table information
- * being passed from prom to identify VGA adapters and the legacy
- * addresses to access them. Until that is done, SN systems rely
- * on the PCDP table to identify the primary VGA console if one
- * exists.
- *
- * However, kernel PCDP support is optional, and even if it is built
- * into the kernel, it will not be used if the boot cmdline contains
- * console= directives.
- *
- * So, to work around this mess, we duplicate some of the PCDP code
- * here so that the primary VGA console (as defined by PCDP) will
- * work on SN systems even if a different console (e.g. serial) is
- * selected on the boot line (or CONFIG_EFI_PCDP is off).
- */
-
- if (! vga_console_membase)
- sn_scan_pcdp();
-
- if (vga_console_membase) {
- /* usable vga ... make tty0 the preferred default console */
- if (!strstr(*cmdline_p, "console="))
- add_preferred_console("tty", 0, NULL);
- } else {
- printk(KERN_DEBUG "SGI: Disabling VGA console\n");
- if (!strstr(*cmdline_p, "console="))
- add_preferred_console("ttySG", 0, NULL);
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#else
- conswitchp = NULL;
-#endif /* CONFIG_DUMMY_CONSOLE */
- }
-#endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
-
- MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
-#endif
-
- /*
- * Build the tables for managing cnodes.
- */
- build_cnode_tables();
-
- status =
- ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
- &drift);
- if (status != 0 || ticks_per_sec < 100000) {
- printk(KERN_WARNING
- "unable to determine platform RTC clock frequency, guessing.\n");
- /* PROM gives wrong value for clock freq. so guess */
- sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
- } else
- sn_rtc_cycles_per_second = ticks_per_sec;
-#ifndef XEN
-
- platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
-
- ia64_printk_clock = ia64_sn2_printk_clock;
-#endif
-
- printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
-
- /*
- * we set the default root device to /dev/hda
- * to make simulation easy
- */
-#ifndef XEN
- ROOT_DEV = Root_HDA1;
-#endif
-
- /*
- * Create the PDAs and NODEPDAs for all the cpus.
- */
- sn_init_pdas(cmdline_p);
-
-#ifndef XEN
- ia64_mark_idle = &snidle;
-#endif
-
- /*
- * For the bootcpu, we do this here. All other cpus will make the
- * call as part of cpu_init in slave cpu initialization.
- */
- sn_cpu_init();
-
-#ifndef XEN
-#ifdef CONFIG_SMP
- init_smp_config();
-#endif
- screen_info = sn_screen_info;
-
- sn_timer_init();
-
- /*
- * set pm_power_off to a SAL call to allow
- * sn machines to power off. The SAL call can be replaced
- * by an ACPI interface call when ACPI is fully implemented
- * for sn.
- */
- pm_power_off = ia64_sn_power_down;
- current->thread.flags |= IA64_THREAD_MIGRATION;
-#endif
-}
-
-/**
- * sn_init_pdas - setup node data areas
- *
- * One time setup for Node Data Area. Called by sn_setup().
- */
-static void __init sn_init_pdas(char **cmdline_p)
-{
- cnodeid_t cnode;
-
- /*
- * Allocate & initalize the nodepda for each node.
- */
- for_each_online_node(cnode) {
- nodepdaindr[cnode] =
- alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
- memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
- memset(nodepdaindr[cnode]->phys_cpuid, -1,
- sizeof(nodepdaindr[cnode]->phys_cpuid));
- spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
- }
-
- /*
- * Allocate & initialize nodepda for TIOs. For now, put them on node 0.
- */
- for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
- nodepdaindr[cnode] =
- alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
- memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
- }
-
- /*
- * Now copy the array of nodepda pointers to each nodepda.
- */
- for (cnode = 0; cnode < num_cnodes; cnode++)
- memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
- sizeof(nodepdaindr));
-
-#ifndef XEN
- /*
- * Set up IO related platform-dependent nodepda fields.
- * The following routine actually sets up the hubinfo struct
- * in nodepda.
- */
- for_each_online_node(cnode) {
- bte_init_node(nodepdaindr[cnode], cnode);
- }
-
- /*
- * Initialize the per node hubdev. This includes IO Nodes and
- * headless/memless nodes.
- */
- for (cnode = 0; cnode < num_cnodes; cnode++) {
- hubdev_init_node(nodepdaindr[cnode], cnode);
- }
-#endif
-}
-
-/**
- * sn_cpu_init - initialize per-cpu data areas
- * @cpuid: cpuid of the caller
- *
- * Called during cpu initialization on each cpu as it starts.
- * Currently, initializes the per-cpu data area for SNIA.
- * Also sets up a few fields in the nodepda. Also known as
- * platform_cpu_init() by the ia64 machvec code.
- */
-void __cpuinit sn_cpu_init(void)
-{
- int cpuid;
- int cpuphyid;
- int nasid;
- int subnode;
- int slice;
- int cnode;
- int i;
- static int wars_have_been_checked;
-
- cpuid = smp_processor_id();
-#ifndef XEN
- if (cpuid == 0 && IS_MEDUSA()) {
- if (ia64_sn_is_fake_prom())
- sn_prom_type = 2;
- else
- sn_prom_type = 1;
- printk(KERN_INFO "Running on medusa with %s PROM\n",
- (sn_prom_type == 1) ? "real" : "fake");
- }
-#endif
-
- memset(pda, 0, sizeof(pda));
- if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
- &sn_hub_info->nasid_bitmask,
- &sn_hub_info->nasid_shift,
- &sn_system_size, &sn_sharing_domain_size,
- &sn_partition_id, &sn_coherency_id,
- &sn_region_size))
- BUG();
- sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
-
- /*
- * Don't check status. The SAL call is not supported on all PROMs
- * but a failure is harmless.
- */
- (void) ia64_sn_set_cpu_number(cpuid);
-
- /*
- * The boot cpu makes this call again after platform initialization is
- * complete.
- */
- if (nodepdaindr[0] == NULL)
- return;
-
- for (i = 0; i < MAX_PROM_FEATURE_SETS; i++)
- if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
- break;
-
- cpuphyid = get_sapicid();
-
- if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
- BUG();
-
- for (i=0; i < MAX_NUMNODES; i++) {
- if (nodepdaindr[i]) {
- nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
- nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
- nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
- }
- }
-
- cnode = nasid_to_cnodeid(nasid);
-
- sn_nodepda = nodepdaindr[cnode];
-
- pda->led_address =
- (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
- pda->led_state = LED_ALWAYS_SET;
- pda->hb_count = HZ / 2;
- pda->hb_state = 0;
- pda->idle_flag = 0;
-
- if (cpuid != 0) {
- /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
- memcpy(sn_cnodeid_to_nasid,
- (&per_cpu(__sn_cnodeid_to_nasid, 0)),
- sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
- }
-
- /*
- * Check for WARs.
- * Only needs to be done once, on BSP.
- * Has to be done after loop above, because it uses this cpu's
- * sn_cnodeid_to_nasid table which was just initialized if this
- * isn't cpu 0.
- * Has to be done before assignment below.
- */
- if (!wars_have_been_checked) {
- sn_check_for_wars();
- wars_have_been_checked = 1;
- }
- sn_hub_info->shub_1_1_found = shub_1_1_found;
-
- /*
- * Set up addresses of PIO/MEM write status registers.
- */
- {
- u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
- u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2,
- SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3};
- u64 *pio;
- pio = is_shub1() ? pio1 : pio2;
- pda->pio_write_status_addr =
- (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]);
- pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
- }
-
-#ifndef XEN /* local_node_data is not allocated .... yet */
- /*
- * WAR addresses for SHUB 1.x.
- */
- if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
- int buddy_nasid;
- buddy_nasid =
- cnodeid_to_nasid(numa_node_id() ==
- num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
- pda->pio_shub_war_cam_addr =
- (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
- SH1_PI_CAM_CONTROL);
- }
-#endif
-}
-
-/*
- * Build tables for converting between NASIDs and cnodes.
- */
-static inline int __init board_needs_cnode(int type)
-{
- return (type == KLTYPE_SNIA || type == KLTYPE_TIO);
-}
-
-void __init build_cnode_tables(void)
-{
- int nasid;
- int node;
- lboard_t *brd;
-
- memset(physical_node_map, -1, sizeof(physical_node_map));
- memset(sn_cnodeid_to_nasid, -1,
- sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
-
- /*
- * First populate the tables with C/M bricks. This ensures that
- * cnode == node for all C & M bricks.
- */
- for_each_online_node(node) {
- nasid = pxm_to_nasid(node_to_pxm(node));
- sn_cnodeid_to_nasid[node] = nasid;
- physical_node_map[nasid] = node;
- }
-
- /*
- * num_cnodes is total number of C/M/TIO bricks. Because of the 256 node
- * limit on the number of nodes, we can't use the generic node numbers
- * for this. Note that num_cnodes is incremented below as TIOs or
- * headless/memoryless nodes are discovered.
- */
- num_cnodes = num_online_nodes();
-
- /* fakeprom does not support klgraph */
- if (IS_RUNNING_ON_FAKE_PROM())
- return;
-
- /* Find TIOs & headless/memoryless nodes and add them to the tables */
- for_each_online_node(node) {
- kl_config_hdr_t *klgraph_header;
- nasid = cnodeid_to_nasid(node);
- klgraph_header = ia64_sn_get_klconfig_addr(nasid);
- if (klgraph_header == NULL)
- BUG();
- brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
- while (brd) {
- if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) {
- sn_cnodeid_to_nasid[num_cnodes] = brd->brd_nasid;
- physical_node_map[brd->brd_nasid] = num_cnodes++;
- }
- brd = find_lboard_next(brd);
- }
- }
-}
-
-int
-nasid_slice_to_cpuid(int nasid, int slice)
-{
- long cpu;
-
- for (cpu = 0; cpu < NR_CPUS; cpu++)
- if (cpuid_to_nasid(cpu) == nasid &&
- cpuid_to_slice(cpu) == slice)
- return cpu;
-
- return -1;
-}
-
-int sn_prom_feature_available(int id)
-{
- if (id >= BITS_PER_LONG * MAX_PROM_FEATURE_SETS)
- return 0;
- return test_bit(id, sn_prom_features);
-}
-EXPORT_SYMBOL(sn_prom_feature_available);
-
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
deleted file mode 100644
index f79af88394..0000000000
--- a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
+++ /dev/null
@@ -1,621 +0,0 @@
-/*
- * SN2 Platform specific SMP Support
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/threads.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mmzone.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/nodemask.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
-#include <asm/processor.h>
-#include <asm/irq.h>
-#include <asm/sal.h>
-#include <asm/system.h>
-#include <asm/delay.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/tlb.h>
-#include <asm/numa.h>
-#include <asm/hw_irq.h>
-#include <asm/current.h>
-#ifdef XEN
-#include <asm/sn/arch.h>
-#endif
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/shub_mmr.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/rw_mmr.h>
-
-DEFINE_PER_CPU(struct ptc_stats, ptcstats);
-DECLARE_PER_CPU(struct ptc_stats, ptcstats);
-
-static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
-
-extern unsigned long
-sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
- volatile unsigned long *, unsigned long,
- volatile unsigned long *, unsigned long);
-void
-sn2_ptc_deadlock_recovery(short *, short, short, int,
- volatile unsigned long *, unsigned long,
- volatile unsigned long *, unsigned long);
-
-/*
- * Note: some is the following is captured here to make degugging easier
- * (the macros make more sense if you see the debug patch - not posted)
- */
-#define sn2_ptctest 0
-#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0)
-#define max_active_pio(sh1) ((sh1) ? 32 : 7)
-#define reset_max_active_on_deadlock() 1
-#ifndef XEN
-#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
-#else
-#define PTC_LOCK(sh1) &sn2_global_ptc_lock
-#endif
-
-struct ptc_stats {
- unsigned long ptc_l;
- unsigned long change_rid;
- unsigned long shub_ptc_flushes;
- unsigned long nodes_flushed;
- unsigned long deadlocks;
- unsigned long deadlocks2;
- unsigned long lock_itc_clocks;
- unsigned long shub_itc_clocks;
- unsigned long shub_itc_clocks_max;
- unsigned long shub_ptc_flushes_not_my_mm;
-};
-
-#define sn2_ptctest 0
-
-static inline unsigned long wait_piowc(void)
-{
- volatile unsigned long *piows;
- unsigned long zeroval, ws;
-
- piows = pda->pio_write_status_addr;
- zeroval = pda->pio_write_status_val;
- do {
- cpu_relax();
- } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
- return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
-}
-
-#ifndef XEN /* No idea if Xen will ever support this */
-/**
- * sn_migrate - SN-specific task migration actions
- * @task: Task being migrated to new CPU
- *
- * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
- * Context switching user threads which have memory-mapped MMIO may cause
- * PIOs to issue from seperate CPUs, thus the PIO writes must be drained
- * from the previous CPU's Shub before execution resumes on the new CPU.
- */
-void sn_migrate(struct task_struct *task)
-{
- pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu);
- volatile unsigned long *adr = last_pda->pio_write_status_addr;
- unsigned long val = last_pda->pio_write_status_val;
-
- /* Drain PIO writes from old CPU's Shub */
- while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK)
- != val))
- cpu_relax();
-}
-
-void sn_tlb_migrate_finish(struct mm_struct *mm)
-{
- /* flush_tlb_mm is inefficient if more than 1 users of mm */
-#ifndef XEN
- if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
-#else
- if (mm == &current->arch.mm && mm && atomic_read(&mm->mm_users) == 1)
-#endif
- flush_tlb_mm(mm);
-}
-#endif
-
-/**
- * sn2_global_tlb_purge - globally purge translation cache of virtual address range
- * @mm: mm_struct containing virtual address range
- * @start: start of virtual address range
- * @end: end of virtual address range
- * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
- *
- * Purges the translation caches of all processors of the given virtual address
- * range.
- *
- * Note:
- * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
- * - cpu_vm_mask is converted into a nodemask of the nodes containing the
- * cpus in cpu_vm_mask.
- * - if only one bit is set in cpu_vm_mask & it is the current cpu & the
- * process is purging its own virtual address range, then only the
- * local TLB needs to be flushed. This flushing can be done using
- * ptc.l. This is the common case & avoids the global spinlock.
- * - if multiple cpus have loaded the context, then flushing has to be
- * done with ptc.g/MMRs under protection of the global ptc_lock.
- */
-
-#ifdef XEN /* Xen is soooooooo stupid! */
-// static cpumask_t mask_all = CPU_MASK_ALL;
-#endif
-
-#ifdef XEN
-static DEFINE_SPINLOCK(sn2_ptcg_lock);
-
-struct sn_flush_struct {
- unsigned long start;
- unsigned long end;
- unsigned long nbits;
-};
-
-static void sn_flush_ptcga_cpu(void *ptr)
-{
- struct sn_flush_struct *sn_flush = ptr;
- unsigned long start, end, nbits;
-
- start = sn_flush->start;
- end = sn_flush->end;
- nbits = sn_flush->nbits;
-
- /*
- * Contention me harder!!!
- */
- /* HW requires global serialization of ptc.ga. */
- spin_lock(&sn2_ptcg_lock);
- {
- do {
- /*
- * Flush ALAT entries also.
- */
- ia64_ptcga(start, (nbits<<2));
- ia64_srlz_i();
- start += (1UL << nbits);
- } while (start < end);
- }
- spin_unlock(&sn2_ptcg_lock);
-}
-
-void
-sn2_global_tlb_purge(unsigned long start,
- unsigned long end, unsigned long nbits)
-{
- nodemask_t nodes_flushed;
- cpumask_t selected_cpus;
- int cpu, cnode, i;
- static DEFINE_SPINLOCK(sn2_ptcg_lock2);
-
- nodes_clear(nodes_flushed);
- cpumask_clear(&selected_cpus);
-
- spin_lock(&sn2_ptcg_lock2);
- node_set(cpu_to_node(smp_processor_id()), nodes_flushed);
- i = 0;
- for_each_possible_cpu(cpu) {
- cnode = cpu_to_node(cpu);
- if (!node_isset(cnode, nodes_flushed)) {
- cpumask_set_cpu(cpu, &selected_cpus);
- i++;
- }
- node_set(cnode, nodes_flushed);
- }
-
- /* HW requires global serialization of ptc.ga. */
- spin_lock(&sn2_ptcg_lock);
- {
- do {
- /*
- * Flush ALAT entries also.
- */
- ia64_ptcga(start, (nbits<<2));
- ia64_srlz_i();
- start += (1UL << nbits);
- } while (start < end);
- }
- spin_unlock(&sn2_ptcg_lock);
-
- if (i) {
- struct sn_flush_struct flush_data;
- flush_data.start = start;
- flush_data.end = end;
- flush_data.nbits = nbits;
- on_selected_cpus(&selected_cpus, sn_flush_ptcga_cpu,
- &flush_data, 1);
- }
- spin_unlock(&sn2_ptcg_lock2);
-}
-#else
-void
-sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
- unsigned long end, unsigned long nbits)
-{
- int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
- int mymm = (mm == current->active_mm && mm == current->mm);
- int use_cpu_ptcga;
- volatile unsigned long *ptc0, *ptc1;
- unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
- short nasids[MAX_NUMNODES], nix;
- nodemask_t nodes_flushed;
- int active, max_active, deadlock;
-
- nodes_clear(nodes_flushed);
- i = 0;
-
-#ifndef XEN /* One day Xen will grow up! */
- for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
- cnode = cpu_to_node(cpu);
- node_set(cnode, nodes_flushed);
- lcpu = cpu;
- i++;
- }
-#else
- for_each_possible_cpu(cpu) {
- cnode = cpu_to_node(cpu);
- node_set(cnode, nodes_flushed);
- lcpu = cpu;
- i++;
- }
-#endif
-
- if (i == 0)
- return;
-
- preempt_disable();
-
-#ifndef XEN
- if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) {
- do {
- ia64_ptcl(start, nbits << 2);
- start += (1UL << nbits);
- } while (start < end);
- ia64_srlz_i();
- __get_cpu_var(ptcstats).ptc_l++;
- preempt_enable();
- return;
- }
-
- if (atomic_read(&mm->mm_users) == 1 && mymm) {
- flush_tlb_mm(mm);
- __get_cpu_var(ptcstats).change_rid++;
- preempt_enable();
- return;
- }
-#endif
-
- itc = ia64_get_itc();
- nix = 0;
- for_each_node_mask(cnode, nodes_flushed)
- nasids[nix++] = cnodeid_to_nasid(cnode);
-
-#ifndef XEN
- rr_value = (mm->context << 3) | REGION_NUMBER(start);
-#else
- rr_value = REGION_NUMBER(start);
-#endif
-
- shub1 = is_shub1();
- if (shub1) {
- data0 = (1UL << SH1_PTC_0_A_SHFT) |
- (nbits << SH1_PTC_0_PS_SHFT) |
- (rr_value << SH1_PTC_0_RID_SHFT) |
- (1UL << SH1_PTC_0_START_SHFT);
-#ifndef XEN
- ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
- ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
-#else
- ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
- ptc1 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
-#endif
- } else {
- data0 = (1UL << SH2_PTC_A_SHFT) |
- (nbits << SH2_PTC_PS_SHFT) |
- (1UL << SH2_PTC_START_SHFT);
-#ifndef XEN
- ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
-#else
- ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
-#endif
- (rr_value << SH2_PTC_RID_SHFT));
- ptc1 = NULL;
- }
-
-
- mynasid = get_nasid();
- use_cpu_ptcga = local_node_uses_ptc_ga(shub1);
- max_active = max_active_pio(shub1);
-
- itc = ia64_get_itc();
- spin_lock_irqsave(PTC_LOCK(shub1), flags);
- itc2 = ia64_get_itc();
-
- __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
- __get_cpu_var(ptcstats).shub_ptc_flushes++;
- __get_cpu_var(ptcstats).nodes_flushed += nix;
- if (!mymm)
- __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++;
-
- if (use_cpu_ptcga && !mymm) {
- old_rr = ia64_get_rr(start);
- ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8));
- ia64_srlz_d();
- }
-
- wait_piowc();
- do {
- if (shub1)
- data1 = start | (1UL << SH1_PTC_1_START_SHFT);
- else
- data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
- deadlock = 0;
- active = 0;
- for (ibegin = 0, i = 0; i < nix; i++) {
- nasid = nasids[i];
- if (use_cpu_ptcga && unlikely(nasid == mynasid)) {
- ia64_ptcga(start, nbits << 2);
- ia64_srlz_i();
- } else {
- ptc0 = CHANGE_NASID(nasid, ptc0);
- if (ptc1)
- ptc1 = CHANGE_NASID(nasid, ptc1);
- pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
- active++;
- }
- if (active >= max_active || i == (nix - 1)) {
- if ((deadlock = wait_piowc())) {
- sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
- if (reset_max_active_on_deadlock())
- max_active = 1;
- }
- active = 0;
- ibegin = i + 1;
- }
- }
- start += (1UL << nbits);
- } while (start < end);
-
- itc2 = ia64_get_itc() - itc2;
- __get_cpu_var(ptcstats).shub_itc_clocks += itc2;
- if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
- __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2;
-
- if (old_rr) {
- ia64_set_rr(start, old_rr);
- ia64_srlz_d();
- }
-
- spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
-
- preempt_enable();
-}
-#endif
-
-/*
- * sn2_ptc_deadlock_recovery
- *
- * Recover from PTC deadlocks conditions. Recovery requires stepping thru each
- * TLB flush transaction. The recovery sequence is somewhat tricky & is
- * coded in assembly language.
- */
-
-void
-sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
- volatile unsigned long *ptc0, unsigned long data0,
- volatile unsigned long *ptc1, unsigned long data1)
-{
- short nasid, i;
- unsigned long *piows, zeroval, n;
-
- __get_cpu_var(ptcstats).deadlocks++;
-
- piows = (unsigned long *) pda->pio_write_status_addr;
- zeroval = pda->pio_write_status_val;
-
-
- for (i=ib; i <= ie; i++) {
- nasid = nasids[i];
- if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid)
- continue;
- ptc0 = CHANGE_NASID(nasid, ptc0);
- if (ptc1)
- ptc1 = CHANGE_NASID(nasid, ptc1);
-
- n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
- __get_cpu_var(ptcstats).deadlocks2 += n;
- }
-
-}
-
-/**
- * sn_send_IPI_phys - send an IPI to a Nasid and slice
- * @nasid: nasid to receive the interrupt (may be outside partition)
- * @physid: physical cpuid to receive the interrupt.
- * @vector: command to send
- * @delivery_mode: delivery mechanism
- *
- * Sends an IPI (interprocessor interrupt) to the processor specified by
- * @physid
- *
- * @delivery_mode can be one of the following
- *
- * %IA64_IPI_DM_INT - pend an interrupt
- * %IA64_IPI_DM_PMI - pend a PMI
- * %IA64_IPI_DM_NMI - pend an NMI
- * %IA64_IPI_DM_INIT - pend an INIT interrupt
- */
-void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode)
-{
- long val;
- unsigned long flags = 0;
- volatile long *p;
-
- p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
- val = (1UL << SH_IPI_INT_SEND_SHFT) |
- (physid << SH_IPI_INT_PID_SHFT) |
- ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
- ((long)vector << SH_IPI_INT_IDX_SHFT) |
- (0x000feeUL << SH_IPI_INT_BASE_SHFT);
-
- mb();
- if (enable_shub_wars_1_1()) {
- spin_lock_irqsave(&sn2_global_ptc_lock, flags);
- }
- pio_phys_write_mmr(p, val);
- if (enable_shub_wars_1_1()) {
- wait_piowc();
- spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
- }
-
-}
-
-EXPORT_SYMBOL(sn_send_IPI_phys);
-
-/**
- * sn2_send_IPI - send an IPI to a processor
- * @cpuid: target of the IPI
- * @vector: command to send
- * @delivery_mode: delivery mechanism
- * @redirect: redirect the IPI?
- *
- * Sends an IPI (InterProcessor Interrupt) to the processor specified by
- * @cpuid. @vector specifies the command to send, while @delivery_mode can
- * be one of the following
- *
- * %IA64_IPI_DM_INT - pend an interrupt
- * %IA64_IPI_DM_PMI - pend a PMI
- * %IA64_IPI_DM_NMI - pend an NMI
- * %IA64_IPI_DM_INIT - pend an INIT interrupt
- */
-void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
-{
- long physid;
- int nasid;
-
- physid = cpu_physical_id(cpuid);
-#ifdef XEN
- if (!sn_nodepda) {
- ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
- } else
-#endif
- nasid = cpuid_to_nasid(cpuid);
-
- /* the following is used only when starting cpus at boot time */
- if (unlikely(nasid == -1))
- ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
-
- sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
-}
-
-#ifdef CONFIG_PROC_FS
-
-#define PTC_BASENAME "sgi_sn/ptc_statistics"
-
-static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
-{
- if (*offset < NR_CPUS)
- return offset;
- return NULL;
-}
-
-static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
-{
- (*offset)++;
- if (*offset < NR_CPUS)
- return offset;
- return NULL;
-}
-
-static void sn2_ptc_seq_stop(struct seq_file *file, void *data)
-{
-}
-
-static int sn2_ptc_seq_show(struct seq_file *file, void *data)
-{
- struct ptc_stats *stat;
- int cpu;
-
- cpu = *(loff_t *) data;
-
- if (!cpu) {
- seq_printf(file,
- "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n");
- seq_printf(file, "# ptctest %d\n", sn2_ptctest);
- }
-
- if (cpu < NR_CPUS && cpu_online(cpu)) {
- stat = &per_cpu(ptcstats, cpu);
- seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
- stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
- stat->deadlocks,
- 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
- stat->shub_ptc_flushes_not_my_mm,
- stat->deadlocks2);
- }
- return 0;
-}
-
-static struct seq_operations sn2_ptc_seq_ops = {
- .start = sn2_ptc_seq_start,
- .next = sn2_ptc_seq_next,
- .stop = sn2_ptc_seq_stop,
- .show = sn2_ptc_seq_show
-};
-
-static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &sn2_ptc_seq_ops);
-}
-
-static struct file_operations proc_sn2_ptc_operations = {
- .open = sn2_ptc_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-static struct proc_dir_entry *proc_sn2_ptc;
-
-static int __init sn2_ptc_init(void)
-{
- if (!ia64_platform_is("sn2"))
- return 0;
-
- if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) {
- printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
- return -EINVAL;
- }
- proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations;
- spin_lock_init(&sn2_global_ptc_lock);
- return 0;
-}
-
-static void __exit sn2_ptc_exit(void)
-{
- remove_proc_entry(PTC_BASENAME, NULL);
-}
-
-module_init(sn2_ptc_init);
-module_exit(sn2_ptc_exit);
-#endif /* CONFIG_PROC_FS */
-
diff --git a/xen/arch/ia64/linux-xen/time.c b/xen/arch/ia64/linux-xen/time.c
deleted file mode 100644
index c84513ed64..0000000000
--- a/xen/arch/ia64/linux-xen/time.c
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * linux/arch/ia64/kernel/time.c
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger <davidm@hpl.hp.com>
- * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
- * Copyright (C) 1999-2000 VA Linux Systems
- * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
- */
-#include <linux/config.h>
-
-#include <linux/cpu.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/profile.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/interrupt.h>
-#include <linux/efi.h>
-#include <linux/profile.h>
-#include <linux/timex.h>
-
-#include <asm/machvec.h>
-#include <asm/delay.h>
-#include <asm/hw_irq.h>
-#include <asm/ptrace.h>
-#include <asm/sal.h>
-#include <asm/sections.h>
-#include <asm/system.h>
-#ifdef XEN
-#include <linux/jiffies.h> // not included by xen/sched.h
-#endif
-
-extern unsigned long wall_jiffies;
-
-u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
-
-EXPORT_SYMBOL(jiffies_64);
-
-#define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */
-
-#ifdef CONFIG_IA64_DEBUG_IRQ
-
-unsigned long last_cli_ip;
-EXPORT_SYMBOL(last_cli_ip);
-
-#endif
-
-#ifndef XEN
-static struct time_interpolator itc_interpolator = {
- .shift = 16,
- .mask = 0xffffffffffffffffLL,
- .source = TIME_SOURCE_CPU
-};
-
-static irqreturn_t
-timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
-{
- unsigned long new_itm;
-
- if (unlikely(cpu_is_offline(smp_processor_id()))) {
- return IRQ_HANDLED;
- }
-
- platform_timer_interrupt(irq, dev_id, regs);
-
- new_itm = local_cpu_data->itm_next;
-
- if (!time_after(ia64_get_itc(), new_itm))
- printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
- ia64_get_itc(), new_itm);
-
- profile_tick(CPU_PROFILING, regs);
-
- while (1) {
- update_process_times(user_mode(regs));
-
- new_itm += local_cpu_data->itm_delta;
-
- if (smp_processor_id() == TIME_KEEPER_ID) {
- /*
- * Here we are in the timer irq handler. We have irqs locally
- * disabled, but we don't know if the timer_bh is running on
- * another CPU. We need to avoid to SMP race by acquiring the
- * xtime_lock.
- */
- write_seqlock(&xtime_lock);
- do_timer(regs);
- local_cpu_data->itm_next = new_itm;
- write_sequnlock(&xtime_lock);
- } else
- local_cpu_data->itm_next = new_itm;
-
- if (time_after(new_itm, ia64_get_itc()))
- break;
- }
-
- do {
- /*
- * If we're too close to the next clock tick for
- * comfort, we increase the safety margin by
- * intentionally dropping the next tick(s). We do NOT
- * update itm.next because that would force us to call
- * do_timer() which in turn would let our clock run
- * too fast (with the potentially devastating effect
- * of losing monotony of time).
- */
- while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
- new_itm += local_cpu_data->itm_delta;
- ia64_set_itm(new_itm);
- /* double check, in case we got hit by a (slow) PMI: */
- } while (time_after_eq(ia64_get_itc(), new_itm));
- return IRQ_HANDLED;
-}
-#endif
-
-/*
- * Encapsulate access to the itm structure for SMP.
- */
-void
-ia64_cpu_local_tick (void)
-{
- int cpu = smp_processor_id();
- unsigned long shift = 0, delta;
-
- /* arrange for the cycle counter to generate a timer interrupt: */
- ia64_set_itv(IA64_TIMER_VECTOR);
-
- delta = local_cpu_data->itm_delta;
- /*
- * Stagger the timer tick for each CPU so they don't occur all at (almost) the
- * same time:
- */
- if (cpu) {
- unsigned long hi = 1UL << ia64_fls(cpu);
- shift = (2*(cpu - hi) + 1) * delta/hi/2;
- }
- local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
- ia64_set_itm(local_cpu_data->itm_next);
-}
-
-static int nojitter;
-
-static int __init nojitter_setup(char *str)
-{
- nojitter = 1;
- printk("Jitter checking for ITC timers disabled\n");
- return 1;
-}
-
-__setup("nojitter", nojitter_setup);
-
-
-void __devinit
-ia64_init_itm (void)
-{
- unsigned long platform_base_freq, itc_freq;
- struct pal_freq_ratio itc_ratio, proc_ratio;
-#ifdef XEN /* warning cleanup */
- unsigned long status, platform_base_drift, itc_drift;
-#else
- long status, platform_base_drift, itc_drift;
-#endif
-
- /*
- * According to SAL v2.6, we need to use a SAL call to determine the platform base
- * frequency and then a PAL call to determine the frequency ratio between the ITC
- * and the base frequency.
- */
- status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
- &platform_base_freq, &platform_base_drift);
- if (status != 0) {
- printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
- } else {
- status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
- if (status != 0)
- printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
- }
- if (status != 0) {
- /* invent "random" values */
- printk(KERN_ERR
- "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
- platform_base_freq = 100000000;
- platform_base_drift = -1; /* no drift info */
- itc_ratio.num = 3;
- itc_ratio.den = 1;
- }
- if (platform_base_freq < 40000000) {
- printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
- platform_base_freq);
- platform_base_freq = 75000000;
- platform_base_drift = -1;
- }
- if (!proc_ratio.den)
- proc_ratio.den = 1; /* avoid division by zero */
- if (!itc_ratio.den)
- itc_ratio.den = 1; /* avoid division by zero */
-
- itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
-
- local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
- printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
- "ITC freq=%lu.%03luMHz", smp_processor_id(),
- platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
-#ifdef XEN
- (u64)itc_ratio.num, (u64)itc_ratio.den,
- itc_freq / 1000000, (itc_freq / 1000) % 1000);
-#else
- itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
-#endif
-
- if (platform_base_drift != -1) {
- itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
- printk("+/-%ldppm\n", itc_drift);
- } else {
- itc_drift = -1;
- printk("\n");
- }
-
- local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
- local_cpu_data->itc_freq = itc_freq;
- local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
- local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
- + itc_freq/2)/itc_freq;
-
- if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
-#ifndef XEN
- itc_interpolator.frequency = local_cpu_data->itc_freq;
- itc_interpolator.drift = itc_drift;
-#ifdef CONFIG_SMP
- /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
- * Jitter compensation requires a cmpxchg which may limit
- * the scalability of the syscalls for retrieving time.
- * The ITC synchronization is usually successful to within a few
- * ITC ticks but this is not a sure thing. If you need to improve
- * timer performance in SMP situations then boot the kernel with the
- * "nojitter" option. However, doing so may result in time fluctuating (maybe
- * even going backward) if the ITC offsets between the individual CPUs
- * are too large.
- */
- if (!nojitter) itc_interpolator.jitter = 1;
-#endif
- register_time_interpolator(&itc_interpolator);
-#endif
- }
-
- /* Setup the CPU local timer tick */
- ia64_cpu_local_tick();
-}
-
-#ifndef XEN
-static struct irqaction timer_irqaction = {
- .handler = timer_interrupt,
- .flags = SA_INTERRUPT,
- .name = "timer"
-};
-
-void __init
-time_init (void)
-{
- register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
- efi_gettimeofday(&xtime);
- ia64_init_itm();
-
- /*
- * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
- * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
- */
- set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
-}
-#endif
diff --git a/xen/arch/ia64/linux-xen/tlb.c b/xen/arch/ia64/linux-xen/tlb.c
deleted file mode 100644
index 2a6bffffb3..0000000000
--- a/xen/arch/ia64/linux-xen/tlb.c
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * TLB support routines.
- *
- * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
- * Modified RID allocation for SMP
- * Goutham Rao <goutham.rao@intel.com>
- * IPI based ptc implementation and A-step IPI implementation.
- */
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-
-#include <asm/delay.h>
-#include <asm/mmu_context.h>
-#include <asm/pgalloc.h>
-#include <asm/pal.h>
-#include <asm/tlbflush.h>
-
-static struct {
- unsigned long mask; /* mask of supported purge page-sizes */
- unsigned long max_bits; /* log2() of largest supported purge page-size */
-} purge;
-
-#ifndef XEN
-struct ia64_ctx ia64_ctx = {
- .lock = SPIN_LOCK_UNLOCKED,
- .next = 1,
- .limit = (1 << 15) - 1, /* start out with the safe (architected) limit */
- .max_ctx = ~0U
-};
-
-DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
-
-/*
- * Acquire the ia64_ctx.lock before calling this function!
- */
-void
-wrap_mmu_context (struct mm_struct *mm)
-{
- unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
- struct task_struct *tsk;
- int i;
-
- if (ia64_ctx.next > max_ctx)
- ia64_ctx.next = 300; /* skip daemons */
- ia64_ctx.limit = max_ctx + 1;
-
- /*
- * Scan all the task's mm->context and set proper safe range
- */
-
- read_lock(&tasklist_lock);
- repeat:
- for_each_process(tsk) {
- if (!tsk->mm)
- continue;
- tsk_context = tsk->mm->context;
- if (tsk_context == ia64_ctx.next) {
- if (++ia64_ctx.next >= ia64_ctx.limit) {
- /* empty range: reset the range limit and start over */
- if (ia64_ctx.next > max_ctx)
- ia64_ctx.next = 300;
- ia64_ctx.limit = max_ctx + 1;
- goto repeat;
- }
- }
- if ((tsk_context > ia64_ctx.next) && (tsk_context < ia64_ctx.limit))
- ia64_ctx.limit = tsk_context;
- }
- read_unlock(&tasklist_lock);
- /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
- {
- int cpu = get_cpu(); /* prevent preemption/migration */
- for (i = 0; i < NR_CPUS; ++i)
- if (cpu_online(i) && (i != cpu))
- per_cpu(ia64_need_tlb_flush, i) = 1;
- put_cpu();
- }
- local_flush_tlb_all();
-}
-#endif /* XEN */
-
-void
-ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
-{
- static DEFINE_SPINLOCK(ptcg_lock);
-
- /* HW requires global serialization of ptc.ga. */
- spin_lock(&ptcg_lock);
- {
- do {
- /*
- * Flush ALAT entries also.
- */
- ia64_ptcga(start, (nbits<<2));
- ia64_srlz_i();
- start += (1UL << nbits);
- } while (start < end);
- }
- spin_unlock(&ptcg_lock);
-}
-
-void
-local_flush_tlb_all (void)
-{
- unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
-#ifdef XEN
- /* increment flush clock before mTLB flush */
- u32 flush_time = tlbflush_clock_inc_and_return();
-#endif
- addr = local_cpu_data->ptce_base;
- count0 = local_cpu_data->ptce_count[0];
- count1 = local_cpu_data->ptce_count[1];
- stride0 = local_cpu_data->ptce_stride[0];
- stride1 = local_cpu_data->ptce_stride[1];
-
- local_irq_save(flags);
- for (i = 0; i < count0; ++i) {
- for (j = 0; j < count1; ++j) {
- ia64_ptce(addr);
- addr += stride1;
- }
- addr += stride0;
- }
- local_irq_restore(flags);
- ia64_srlz_i(); /* srlz.i implies srlz.d */
-#ifdef XEN
- /* update after mTLB flush. */
- tlbflush_update_time(&__get_cpu_var(tlbflush_time), flush_time);
-#endif
-}
-EXPORT_SYMBOL(local_flush_tlb_all);
-
-#ifndef XEN
-void
-flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long size = end - start;
- unsigned long nbits;
-
- if (mm != current->active_mm) {
- /* this does happen, but perhaps it's not worth optimizing for? */
-#ifdef CONFIG_SMP
- flush_tlb_all();
-#else
- mm->context = 0;
-#endif
- return;
- }
-
- nbits = ia64_fls(size + 0xfff);
- while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits))
- ++nbits;
- if (nbits > purge.max_bits)
- nbits = purge.max_bits;
- start &= ~((1UL << nbits) - 1);
-
-# ifdef CONFIG_SMP
- platform_global_tlb_purge(start, end, nbits);
-# else
- do {
- ia64_ptcl(start, (nbits<<2));
- start += (1UL << nbits);
- } while (start < end);
-# endif
-
- ia64_srlz_i(); /* srlz.i implies srlz.d */
-}
-EXPORT_SYMBOL(flush_tlb_range);
-#endif
-
-void __devinit
-ia64_tlb_init (void)
-{
-#ifndef XEN
- ia64_ptce_info_t ptce_info;
-#else
- ia64_ptce_info_t ptce_info = { 0 };
-#endif
- unsigned long tr_pgbits;
- long status;
-
- if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
- printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;"
- "defaulting to architected purge page-sizes.\n", status);
- purge.mask = 0x115557000UL;
- }
- purge.max_bits = ia64_fls(purge.mask);
-
- ia64_get_ptce(&ptce_info);
- local_cpu_data->ptce_base = ptce_info.base;
- local_cpu_data->ptce_count[0] = ptce_info.count[0];
- local_cpu_data->ptce_count[1] = ptce_info.count[1];
- local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
- local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
-
- local_flush_tlb_all(); /* nuke left overs from bootstrapping... */
-}
diff --git a/xen/arch/ia64/linux-xen/unaligned.c b/xen/arch/ia64/linux-xen/unaligned.c
deleted file mode 100644
index 3b4d754395..0000000000
--- a/xen/arch/ia64/linux-xen/unaligned.c
+++ /dev/null
@@ -1,1985 +0,0 @@
-/*
- * Architecture-specific unaligned trap handling.
- *
- * Copyright (C) 1999-2002, 2004 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 2002/12/09 Fix rotating register handling (off-by-1 error, missing fr-rotation). Fix
- * get_rse_reg() to not leak kernel bits to user-level (reading an out-of-frame
- * stacked register returns an undefined value; it does NOT trigger a
- * "rsvd register fault").
- * 2001/10/11 Fix unaligned access to rotating registers in s/w pipelined loops.
- * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes.
- * 2001/01/17 Add support emulation of unaligned kernel accesses.
- */
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp_lock.h>
-#include <linux/tty.h>
-
-#include <asm/intrinsics.h>
-#include <asm/processor.h>
-#include <asm/rse.h>
-#include <asm/uaccess.h>
-#include <asm/unaligned.h>
-
-extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
-
-#undef DEBUG_UNALIGNED_TRAP
-
-#ifdef DEBUG_UNALIGNED_TRAP
-# define DPRINT(a...) do { printk("%s %u: ", __FUNCTION__, __LINE__); printk (a); } while (0)
-# define DDUMP(str,vp,len) dump(str, vp, len)
-
-static void
-dump (const char *str, void *vp, size_t len)
-{
- unsigned char *cp = vp;
- int i;
-
- printk("%s", str);
- for (i = 0; i < len; ++i)
- printk (" %02x", *cp++);
- printk("\n");
-}
-#else
-# define DPRINT(a...)
-# define DDUMP(str,vp,len)
-#endif
-
-#define IA64_FIRST_STACKED_GR 32
-#define IA64_FIRST_ROTATING_FR 32
-#define SIGN_EXT9 0xffffffffffffff00ul
-
-/*
- * For M-unit:
- *
- * opcode | m | x6 |
- * --------|------|---------|
- * [40-37] | [36] | [35:30] |
- * --------|------|---------|
- * 4 | 1 | 6 | = 11 bits
- * --------------------------
- * However bits [31:30] are not directly useful to distinguish between
- * load/store so we can use [35:32] instead, which gives the following
- * mask ([40:32]) using 9 bits. The 'e' comes from the fact that we defer
- * checking the m-bit until later in the load/store emulation.
- */
-#define IA64_OPCODE_MASK 0x1ef
-#define IA64_OPCODE_SHIFT 32
-
-/*
- * Table C-28 Integer Load/Store
- *
- * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
- *
- * ld8.fill, st8.fill MUST be aligned because the RNATs are based on
- * the address (bits [8:3]), so we must failed.
- */
-#define LD_OP 0x080
-#define LDS_OP 0x081
-#define LDA_OP 0x082
-#define LDSA_OP 0x083
-#define LDBIAS_OP 0x084
-#define LDACQ_OP 0x085
-/* 0x086, 0x087 are not relevant */
-#define LDCCLR_OP 0x088
-#define LDCNC_OP 0x089
-#define LDCCLRACQ_OP 0x08a
-#define ST_OP 0x08c
-#define STREL_OP 0x08d
-/* 0x08e,0x8f are not relevant */
-
-/*
- * Table C-29 Integer Load +Reg
- *
- * we use the ld->m (bit [36:36]) field to determine whether or not we have
- * a load/store of this form.
- */
-
-/*
- * Table C-30 Integer Load/Store +Imm
- *
- * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
- *
- * ld8.fill, st8.fill must be aligned because the Nat register are based on
- * the address, so we must fail and the program must be fixed.
- */
-#define LD_IMM_OP 0x0a0
-#define LDS_IMM_OP 0x0a1
-#define LDA_IMM_OP 0x0a2
-#define LDSA_IMM_OP 0x0a3
-#define LDBIAS_IMM_OP 0x0a4
-#define LDACQ_IMM_OP 0x0a5
-/* 0x0a6, 0xa7 are not relevant */
-#define LDCCLR_IMM_OP 0x0a8
-#define LDCNC_IMM_OP 0x0a9
-#define LDCCLRACQ_IMM_OP 0x0aa
-#define ST_IMM_OP 0x0ac
-#define STREL_IMM_OP 0x0ad
-/* 0x0ae,0xaf are not relevant */
-
-/*
- * Table C-32 Floating-point Load/Store
- */
-#define LDF_OP 0x0c0
-#define LDFS_OP 0x0c1
-#define LDFA_OP 0x0c2
-#define LDFSA_OP 0x0c3
-/* 0x0c6 is irrelevant */
-#define LDFCCLR_OP 0x0c8
-#define LDFCNC_OP 0x0c9
-/* 0x0cb is irrelevant */
-#define STF_OP 0x0cc
-
-/*
- * Table C-33 Floating-point Load +Reg
- *
- * we use the ld->m (bit [36:36]) field to determine whether or not we have
- * a load/store of this form.
- */
-
-/*
- * Table C-34 Floating-point Load/Store +Imm
- */
-#define LDF_IMM_OP 0x0e0
-#define LDFS_IMM_OP 0x0e1
-#define LDFA_IMM_OP 0x0e2
-#define LDFSA_IMM_OP 0x0e3
-/* 0x0e6 is irrelevant */
-#define LDFCCLR_IMM_OP 0x0e8
-#define LDFCNC_IMM_OP 0x0e9
-#define STF_IMM_OP 0x0ec
-
-typedef struct {
- unsigned long qp:6; /* [0:5] */
- unsigned long r1:7; /* [6:12] */
- unsigned long imm:7; /* [13:19] */
- unsigned long r3:7; /* [20:26] */
- unsigned long x:1; /* [27:27] */
- unsigned long hint:2; /* [28:29] */
- unsigned long x6_sz:2; /* [30:31] */
- unsigned long x6_op:4; /* [32:35], x6 = x6_sz|x6_op */
- unsigned long m:1; /* [36:36] */
- unsigned long op:4; /* [37:40] */
- unsigned long pad:23; /* [41:63] */
-} load_store_t;
-
-
-typedef enum {
- UPD_IMMEDIATE, /* ldXZ r1=[r3],imm(9) */
- UPD_REG /* ldXZ r1=[r3],r2 */
-} update_t;
-
-/*
- * We use tables to keep track of the offsets of registers in the saved state.
- * This way we save having big switch/case statements.
- *
- * We use bit 0 to indicate switch_stack or pt_regs.
- * The offset is simply shifted by 1 bit.
- * A 2-byte value should be enough to hold any kind of offset
- *
- * In case the calling convention changes (and thus pt_regs/switch_stack)
- * simply use RSW instead of RPT or vice-versa.
- */
-
-#define RPO(x) ((size_t) &((struct pt_regs *)0)->x)
-#define RSO(x) ((size_t) &((struct switch_stack *)0)->x)
-
-#define RPT(x) (RPO(x) << 1)
-#define RSW(x) (1| RSO(x)<<1)
-
-#define GR_OFFS(x) (gr_info[x]>>1)
-#define GR_IN_SW(x) (gr_info[x] & 0x1)
-
-#define FR_OFFS(x) (fr_info[x]>>1)
-#define FR_IN_SW(x) (fr_info[x] & 0x1)
-
-static u16 gr_info[32]={
- 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
-
- RPT(r1), RPT(r2), RPT(r3),
-
-#if defined(XEN)
- RPT(r4), RPT(r5), RPT(r6), RPT(r7),
-#else
- RSW(r4), RSW(r5), RSW(r6), RSW(r7),
-#endif
-
- RPT(r8), RPT(r9), RPT(r10), RPT(r11),
- RPT(r12), RPT(r13), RPT(r14), RPT(r15),
-
- RPT(r16), RPT(r17), RPT(r18), RPT(r19),
- RPT(r20), RPT(r21), RPT(r22), RPT(r23),
- RPT(r24), RPT(r25), RPT(r26), RPT(r27),
- RPT(r28), RPT(r29), RPT(r30), RPT(r31)
-};
-
-#ifndef XEN
-static u16 fr_info[32]={
- 0, /* constant : WE SHOULD NEVER GET THIS */
- 0, /* constant : WE SHOULD NEVER GET THIS */
-
- RSW(f2), RSW(f3), RSW(f4), RSW(f5),
-
- RPT(f6), RPT(f7), RPT(f8), RPT(f9),
- RPT(f10), RPT(f11),
-
- RSW(f12), RSW(f13), RSW(f14),
- RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19),
- RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24),
- RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29),
- RSW(f30), RSW(f31)
-};
-
-/* Invalidate ALAT entry for integer register REGNO. */
-static void
-invala_gr (int regno)
-{
-# define F(reg) case reg: ia64_invala_gr(reg); break
-
- switch (regno) {
- F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
- F( 8); F( 9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
- F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
- F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
- F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
- F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
- F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
- F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
- F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
- F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
- F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
- F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
- F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
- F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
- F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
- F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
- }
-# undef F
-}
-
-/* Invalidate ALAT entry for floating-point register REGNO. */
-static void
-invala_fr (int regno)
-{
-# define F(reg) case reg: ia64_invala_fr(reg); break
-
- switch (regno) {
- F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
- F( 8); F( 9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
- F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
- F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
- F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
- F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
- F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
- F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
- F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
- F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
- F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
- F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
- F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
- F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
- F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
- F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
- }
-# undef F
-}
-#endif /* XEN */
-
-static inline unsigned long
-rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
-{
- reg += rrb;
- if (reg >= sor)
- reg -= sor;
- return reg;
-}
-
-#if defined(XEN)
-void
-set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat)
-{
- unsigned long *bsp, *bspstore, *addr, *rnat_addr;
- unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
- unsigned long nat_mask;
- unsigned long old_rsc, new_rsc, psr;
- unsigned long rnat;
- long sof = (regs->cr_ifs) & 0x7f;
- long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
- long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
- long ridx = r1 - 32;
-
- if (ridx >= sof) {
- /* this should never happen, as the "rsvd register fault" has higher priority */
- DPRINT("ignoring write to r%lu; only %lu registers are allocated!\n", r1, sof);
- return;
- }
-
- if (ridx < sor)
- ridx = rotate_reg(sor, rrb_gr, ridx);
-
- old_rsc=ia64_get_rsc();
- /* put RSC to lazy mode, and set loadrs 0 */
- new_rsc = old_rsc & (~0x3fff0003);
- ia64_set_rsc(new_rsc);
- bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
-
- addr = ia64_rse_skip_regs(bsp, -sof + ridx);
- nat_mask = 1UL << ia64_rse_slot_num(addr);
- rnat_addr = ia64_rse_rnat_addr(addr);
-
- local_irq_save(psr);
- bspstore = (unsigned long*)ia64_get_bspstore();
- if(addr >= bspstore){
-
- ia64_flushrs ();
- ia64_mf ();
- *addr = val;
- bspstore = (unsigned long*)ia64_get_bspstore();
- rnat = ia64_get_rnat ();
- if(bspstore < rnat_addr){
- rnat=rnat&(~nat_mask);
- }else{
- *rnat_addr = (*rnat_addr)&(~nat_mask);
- }
- ia64_mf();
- ia64_loadrs();
- ia64_set_rnat(rnat);
- }else{
-
- rnat = ia64_get_rnat ();
- *addr = val;
- if(bspstore < rnat_addr){
- rnat=rnat&(~nat_mask);
- }else{
- *rnat_addr = (*rnat_addr)&(~nat_mask);
- }
- ia64_set_bspstore (bspstore);
- ia64_set_rnat(rnat);
- }
- local_irq_restore(psr);
- ia64_set_rsc(old_rsc);
-}
-
-
-static void
-get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int*nat)
-{
- unsigned long *bsp, *addr, *rnat_addr, *bspstore;
- unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
- unsigned long nat_mask;
- unsigned long old_rsc, new_rsc;
- long sof = (regs->cr_ifs) & 0x7f;
- long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
- long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
- long ridx = r1 - 32;
-
- if (ridx >= sof) {
- /* read of out-of-frame register returns an undefined value; 0 in our case. */
- DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
- panic("wrong stack register number (iip=%lx)\n", regs->cr_iip);
- }
-
- if (ridx < sor)
- ridx = rotate_reg(sor, rrb_gr, ridx);
-
- old_rsc=ia64_get_rsc();
- new_rsc=old_rsc&(~(0x3));
- ia64_set_rsc(new_rsc);
-
- bspstore = (unsigned long*)ia64_get_bspstore();
- bsp =kbs + (regs->loadrs >> 19); //16+3;
-
- addr = ia64_rse_skip_regs(bsp, -sof + ridx);
- nat_mask = 1UL << ia64_rse_slot_num(addr);
- rnat_addr = ia64_rse_rnat_addr(addr);
-
- if(addr >= bspstore){
-
- ia64_flushrs ();
- ia64_mf ();
- bspstore = (unsigned long*)ia64_get_bspstore();
- }
- *val=*addr;
- if(nat){
- if(bspstore < rnat_addr){
- *nat=(int)!!(ia64_get_rnat()&nat_mask);
- }else{
- *nat = (int)!!((*rnat_addr)&nat_mask);
- }
- ia64_set_rsc(old_rsc);
- }
-}
-
-#else
-static void
-set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
-{
- struct switch_stack *sw = (struct switch_stack *) regs - 1;
- unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
- unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
- unsigned long rnats, nat_mask;
- unsigned long on_kbs;
- long sof = (regs->cr_ifs) & 0x7f;
- long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
- long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
- long ridx = r1 - 32;
-
- if (ridx >= sof) {
- /* this should never happen, as the "rsvd register fault" has higher priority */
- DPRINT("ignoring write to r%lu; only %lu registers are allocated!\n", r1, sof);
- return;
- }
-
- if (ridx < sor)
- ridx = rotate_reg(sor, rrb_gr, ridx);
-
- DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
- r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx);
-
- on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
- addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx);
- if (addr >= kbs) {
- /* the register is on the kernel backing store: easy... */
- rnat_addr = ia64_rse_rnat_addr(addr);
- if ((unsigned long) rnat_addr >= sw->ar_bspstore)
- rnat_addr = &sw->ar_rnat;
- nat_mask = 1UL << ia64_rse_slot_num(addr);
-
- *addr = val;
- if (nat)
- *rnat_addr |= nat_mask;
- else
- *rnat_addr &= ~nat_mask;
- return;
- }
-
- if (!user_stack(current, regs)) {
- DPRINT("ignoring kernel write to r%lu; register isn't on the kernel RBS!", r1);
- return;
- }
-
- bspstore = (unsigned long *)regs->ar_bspstore;
- ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
- bsp = ia64_rse_skip_regs(ubs_end, -sof);
- addr = ia64_rse_skip_regs(bsp, ridx);
-
- DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);
-
- ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
-
- rnat_addr = ia64_rse_rnat_addr(addr);
-
- ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
- DPRINT("rnat @%p = 0x%lx nat=%d old nat=%ld\n",
- (void *) rnat_addr, rnats, nat, (rnats >> ia64_rse_slot_num(addr)) & 1);
-
- nat_mask = 1UL << ia64_rse_slot_num(addr);
- if (nat)
- rnats |= nat_mask;
- else
- rnats &= ~nat_mask;
- ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, rnats);
-
- DPRINT("rnat changed to @%p = 0x%lx\n", (void *) rnat_addr, rnats);
-}
-
-
-static void
-get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int *nat)
-{
- struct switch_stack *sw = (struct switch_stack *) regs - 1;
- unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
- unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
- unsigned long rnats, nat_mask;
- unsigned long on_kbs;
- long sof = (regs->cr_ifs) & 0x7f;
- long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
- long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
- long ridx = r1 - 32;
-
- if (ridx >= sof) {
- /* read of out-of-frame register returns an undefined value; 0 in our case. */
- DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
- goto fail;
- }
-
- if (ridx < sor)
- ridx = rotate_reg(sor, rrb_gr, ridx);
-
- DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
- r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx);
-
- on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
- addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx);
- if (addr >= kbs) {
- /* the register is on the kernel backing store: easy... */
- *val = *addr;
- if (nat) {
- rnat_addr = ia64_rse_rnat_addr(addr);
- if ((unsigned long) rnat_addr >= sw->ar_bspstore)
- rnat_addr = &sw->ar_rnat;
- nat_mask = 1UL << ia64_rse_slot_num(addr);
- *nat = (*rnat_addr & nat_mask) != 0;
- }
- return;
- }
-
- if (!user_stack(current, regs)) {
- DPRINT("ignoring kernel read of r%lu; register isn't on the RBS!", r1);
- goto fail;
- }
-
- bspstore = (unsigned long *)regs->ar_bspstore;
- ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
- bsp = ia64_rse_skip_regs(ubs_end, -sof);
- addr = ia64_rse_skip_regs(bsp, ridx);
-
- DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);
-
- ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
-
- if (nat) {
- rnat_addr = ia64_rse_rnat_addr(addr);
- nat_mask = 1UL << ia64_rse_slot_num(addr);
-
- DPRINT("rnat @%p = 0x%lx\n", (void *) rnat_addr, rnats);
-
- ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
- *nat = (rnats & nat_mask) != 0;
- }
- return;
-
- fail:
- *val = 0;
- if (nat)
- *nat = 0;
- return;
-}
-#endif
-
-
-#ifdef XEN
-void
-#else
-static void
-#endif
-setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
-{
- struct switch_stack *sw = (struct switch_stack *) regs - 1;
- unsigned long addr;
- unsigned long bitmask;
- unsigned long *unat;
-
- /*
- * First takes care of stacked registers
- */
- if (regnum >= IA64_FIRST_STACKED_GR) {
- set_rse_reg(regs, regnum, val, nat);
- return;
- }
-
- /*
- * Using r0 as a target raises a General Exception fault which has higher priority
- * than the Unaligned Reference fault.
- */
-
- /*
- * Now look at registers in [0-31] range and init correct UNAT
- */
- if (GR_IN_SW(regnum)) {
- addr = (unsigned long)sw;
- unat = &sw->ar_unat;
- } else {
- addr = (unsigned long)regs;
-#if defined(XEN)
- unat = &regs->eml_unat;
-#else
- unat = &sw->caller_unat;
-#endif
- }
- DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
- addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
- /*
- * add offset from base of struct
- * and do it !
- */
- addr += GR_OFFS(regnum);
-
- *(unsigned long *)addr = val;
-
- /*
- * We need to clear the corresponding UNAT bit to fully emulate the load
- * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
- */
- bitmask = 1UL << (addr >> 3 & 0x3f);
- DPRINT("*0x%lx=0x%lx NaT=%d prev_unat @%p=%lx\n", addr, val, nat, (void *) unat, *unat);
- if (nat) {
- *unat |= bitmask;
- } else {
- *unat &= ~bitmask;
- }
- DPRINT("*0x%lx=0x%lx NaT=%d new unat: %p=%lx\n", addr, val, nat, (void *) unat,*unat);
-}
-
-/*
- * Return the (rotated) index for floating point register REGNUM (REGNUM must be in the
- * range from 32-127, result is in the range from 0-95.
- */
-static inline unsigned long
-fph_index (struct pt_regs *regs, long regnum)
-{
- unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
- return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
-}
-
-#ifndef XEN
-static void
-setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
-{
- struct switch_stack *sw = (struct switch_stack *)regs - 1;
- unsigned long addr;
-
- /*
- * From EAS-2.5: FPDisableFault has higher priority than Unaligned
- * Fault. Thus, when we get here, we know the partition is enabled.
- * To update f32-f127, there are three choices:
- *
- * (1) save f32-f127 to thread.fph and update the values there
- * (2) use a gigantic switch statement to directly access the registers
- * (3) generate code on the fly to update the desired register
- *
- * For now, we are using approach (1).
- */
- if (regnum >= IA64_FIRST_ROTATING_FR) {
- ia64_sync_fph(current);
-#ifdef XEN
- current->arch._thread.fph[fph_index(regs, regnum)] = *fpval;
-#else
- current->thread.fph[fph_index(regs, regnum)] = *fpval;
-#endif
- } else {
- /*
- * pt_regs or switch_stack ?
- */
- if (FR_IN_SW(regnum)) {
- addr = (unsigned long)sw;
- } else {
- addr = (unsigned long)regs;
- }
-
- DPRINT("tmp_base=%lx offset=%d\n", addr, FR_OFFS(regnum));
-
- addr += FR_OFFS(regnum);
- *(struct ia64_fpreg *)addr = *fpval;
-
- /*
- * mark the low partition as being used now
- *
- * It is highly unlikely that this bit is not already set, but
- * let's do it for safety.
- */
- regs->cr_ipsr |= IA64_PSR_MFL;
- }
-}
-#endif /* XEN */
-
-/*
- * Those 2 inline functions generate the spilled versions of the constant floating point
- * registers which can be used with stfX
- */
-static inline void
-float_spill_f0 (struct ia64_fpreg *final)
-{
- ia64_stf_spill(final, 0);
-}
-
-static inline void
-float_spill_f1 (struct ia64_fpreg *final)
-{
- ia64_stf_spill(final, 1);
-}
-
-#ifndef XEN
-static void
-getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
-{
- struct switch_stack *sw = (struct switch_stack *) regs - 1;
- unsigned long addr;
-
- /*
- * From EAS-2.5: FPDisableFault has higher priority than
- * Unaligned Fault. Thus, when we get here, we know the partition is
- * enabled.
- *
- * When regnum > 31, the register is still live and we need to force a save
- * to current->thread.fph to get access to it. See discussion in setfpreg()
- * for reasons and other ways of doing this.
- */
- if (regnum >= IA64_FIRST_ROTATING_FR) {
- ia64_flush_fph(current);
-#ifdef XEN
- *fpval = current->arch._thread.fph[fph_index(regs, regnum)];
-#else
- *fpval = current->thread.fph[fph_index(regs, regnum)];
-#endif
- } else {
- /*
- * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
- * not saved, we must generate their spilled form on the fly
- */
- switch(regnum) {
- case 0:
- float_spill_f0(fpval);
- break;
- case 1:
- float_spill_f1(fpval);
- break;
- default:
- /*
- * pt_regs or switch_stack ?
- */
- addr = FR_IN_SW(regnum) ? (unsigned long)sw
- : (unsigned long)regs;
-
- DPRINT("is_sw=%d tmp_base=%lx offset=0x%x\n",
- FR_IN_SW(regnum), addr, FR_OFFS(regnum));
-
- addr += FR_OFFS(regnum);
- *fpval = *(struct ia64_fpreg *)addr;
- }
- }
-}
-#else
-void
-getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
-{
- // Take floating register rotation into consideration
- if(regnum >= IA64_FIRST_ROTATING_FR)
- regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
-#define CASE_FIXED_FP(reg) \
- case reg: \
- ia64_stf_spill(fpval,reg); \
- break
-#define CASE_SAVED_FP(reg) \
- case reg: \
- fpval->u.bits[0] = regs->f##reg.u.bits[0]; \
- fpval->u.bits[1] = regs->f##reg.u.bits[1]; \
- break
- switch(regnum) {
- CASE_FIXED_FP(0);
- CASE_FIXED_FP(1);
- CASE_FIXED_FP(2);
- CASE_FIXED_FP(3);
- CASE_FIXED_FP(4);
- CASE_FIXED_FP(5);
-
- CASE_SAVED_FP(6);
- CASE_SAVED_FP(7);
- CASE_SAVED_FP(8);
- CASE_SAVED_FP(9);
- CASE_SAVED_FP(10);
- CASE_SAVED_FP(11);
-
- CASE_FIXED_FP(12);
- CASE_FIXED_FP(13);
- CASE_FIXED_FP(14);
- CASE_FIXED_FP(15);
- CASE_FIXED_FP(16);
- CASE_FIXED_FP(17);
- CASE_FIXED_FP(18);
- CASE_FIXED_FP(19);
- CASE_FIXED_FP(20);
- CASE_FIXED_FP(21);
- CASE_FIXED_FP(22);
- CASE_FIXED_FP(23);
- CASE_FIXED_FP(24);
- CASE_FIXED_FP(25);
- CASE_FIXED_FP(26);
- CASE_FIXED_FP(27);
- CASE_FIXED_FP(28);
- CASE_FIXED_FP(29);
- CASE_FIXED_FP(30);
- CASE_FIXED_FP(31);
- CASE_FIXED_FP(32);
- CASE_FIXED_FP(33);
- CASE_FIXED_FP(34);
- CASE_FIXED_FP(35);
- CASE_FIXED_FP(36);
- CASE_FIXED_FP(37);
- CASE_FIXED_FP(38);
- CASE_FIXED_FP(39);
- CASE_FIXED_FP(40);
- CASE_FIXED_FP(41);
- CASE_FIXED_FP(42);
- CASE_FIXED_FP(43);
- CASE_FIXED_FP(44);
- CASE_FIXED_FP(45);
- CASE_FIXED_FP(46);
- CASE_FIXED_FP(47);
- CASE_FIXED_FP(48);
- CASE_FIXED_FP(49);
- CASE_FIXED_FP(50);
- CASE_FIXED_FP(51);
- CASE_FIXED_FP(52);
- CASE_FIXED_FP(53);
- CASE_FIXED_FP(54);
- CASE_FIXED_FP(55);
- CASE_FIXED_FP(56);
- CASE_FIXED_FP(57);
- CASE_FIXED_FP(58);
- CASE_FIXED_FP(59);
- CASE_FIXED_FP(60);
- CASE_FIXED_FP(61);
- CASE_FIXED_FP(62);
- CASE_FIXED_FP(63);
- CASE_FIXED_FP(64);
- CASE_FIXED_FP(65);
- CASE_FIXED_FP(66);
- CASE_FIXED_FP(67);
- CASE_FIXED_FP(68);
- CASE_FIXED_FP(69);
- CASE_FIXED_FP(70);
- CASE_FIXED_FP(71);
- CASE_FIXED_FP(72);
- CASE_FIXED_FP(73);
- CASE_FIXED_FP(74);
- CASE_FIXED_FP(75);
- CASE_FIXED_FP(76);
- CASE_FIXED_FP(77);
- CASE_FIXED_FP(78);
- CASE_FIXED_FP(79);
- CASE_FIXED_FP(80);
- CASE_FIXED_FP(81);
- CASE_FIXED_FP(82);
- CASE_FIXED_FP(83);
- CASE_FIXED_FP(84);
- CASE_FIXED_FP(85);
- CASE_FIXED_FP(86);
- CASE_FIXED_FP(87);
- CASE_FIXED_FP(88);
- CASE_FIXED_FP(89);
- CASE_FIXED_FP(90);
- CASE_FIXED_FP(91);
- CASE_FIXED_FP(92);
- CASE_FIXED_FP(93);
- CASE_FIXED_FP(94);
- CASE_FIXED_FP(95);
- CASE_FIXED_FP(96);
- CASE_FIXED_FP(97);
- CASE_FIXED_FP(98);
- CASE_FIXED_FP(99);
- CASE_FIXED_FP(100);
- CASE_FIXED_FP(101);
- CASE_FIXED_FP(102);
- CASE_FIXED_FP(103);
- CASE_FIXED_FP(104);
- CASE_FIXED_FP(105);
- CASE_FIXED_FP(106);
- CASE_FIXED_FP(107);
- CASE_FIXED_FP(108);
- CASE_FIXED_FP(109);
- CASE_FIXED_FP(110);
- CASE_FIXED_FP(111);
- CASE_FIXED_FP(112);
- CASE_FIXED_FP(113);
- CASE_FIXED_FP(114);
- CASE_FIXED_FP(115);
- CASE_FIXED_FP(116);
- CASE_FIXED_FP(117);
- CASE_FIXED_FP(118);
- CASE_FIXED_FP(119);
- CASE_FIXED_FP(120);
- CASE_FIXED_FP(121);
- CASE_FIXED_FP(122);
- CASE_FIXED_FP(123);
- CASE_FIXED_FP(124);
- CASE_FIXED_FP(125);
- CASE_FIXED_FP(126);
- CASE_FIXED_FP(127);
- }
-#undef CASE_FIXED_FP
-#undef CASE_SAVED_FP
-}
-
-
-void
-setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
-{
- // Take floating register rotation into consideration
- ia64_fph_enable();
- if(regnum >= IA64_FIRST_ROTATING_FR)
- regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
-
-#define CASE_FIXED_FP(reg) \
- case reg: \
- ia64_ldf_fill(reg,fpval); \
- break
-#define CASE_RESTORED_FP(reg) \
- case reg: \
- regs->f##reg.u.bits[0] = fpval->u.bits[0]; \
- regs->f##reg.u.bits[1] = fpval->u.bits[1] ; \
- break
- switch(regnum) {
- CASE_FIXED_FP(2);
- CASE_FIXED_FP(3);
- CASE_FIXED_FP(4);
- CASE_FIXED_FP(5);
-
- CASE_RESTORED_FP(6);
- CASE_RESTORED_FP(7);
- CASE_RESTORED_FP(8);
- CASE_RESTORED_FP(9);
- CASE_RESTORED_FP(10);
- CASE_RESTORED_FP(11);
-
- CASE_FIXED_FP(12);
- CASE_FIXED_FP(13);
- CASE_FIXED_FP(14);
- CASE_FIXED_FP(15);
- CASE_FIXED_FP(16);
- CASE_FIXED_FP(17);
- CASE_FIXED_FP(18);
- CASE_FIXED_FP(19);
- CASE_FIXED_FP(20);
- CASE_FIXED_FP(21);
- CASE_FIXED_FP(22);
- CASE_FIXED_FP(23);
- CASE_FIXED_FP(24);
- CASE_FIXED_FP(25);
- CASE_FIXED_FP(26);
- CASE_FIXED_FP(27);
- CASE_FIXED_FP(28);
- CASE_FIXED_FP(29);
- CASE_FIXED_FP(30);
- CASE_FIXED_FP(31);
- CASE_FIXED_FP(32);
- CASE_FIXED_FP(33);
- CASE_FIXED_FP(34);
- CASE_FIXED_FP(35);
- CASE_FIXED_FP(36);
- CASE_FIXED_FP(37);
- CASE_FIXED_FP(38);
- CASE_FIXED_FP(39);
- CASE_FIXED_FP(40);
- CASE_FIXED_FP(41);
- CASE_FIXED_FP(42);
- CASE_FIXED_FP(43);
- CASE_FIXED_FP(44);
- CASE_FIXED_FP(45);
- CASE_FIXED_FP(46);
- CASE_FIXED_FP(47);
- CASE_FIXED_FP(48);
- CASE_FIXED_FP(49);
- CASE_FIXED_FP(50);
- CASE_FIXED_FP(51);
- CASE_FIXED_FP(52);
- CASE_FIXED_FP(53);
- CASE_FIXED_FP(54);
- CASE_FIXED_FP(55);
- CASE_FIXED_FP(56);
- CASE_FIXED_FP(57);
- CASE_FIXED_FP(58);
- CASE_FIXED_FP(59);
- CASE_FIXED_FP(60);
- CASE_FIXED_FP(61);
- CASE_FIXED_FP(62);
- CASE_FIXED_FP(63);
- CASE_FIXED_FP(64);
- CASE_FIXED_FP(65);
- CASE_FIXED_FP(66);
- CASE_FIXED_FP(67);
- CASE_FIXED_FP(68);
- CASE_FIXED_FP(69);
- CASE_FIXED_FP(70);
- CASE_FIXED_FP(71);
- CASE_FIXED_FP(72);
- CASE_FIXED_FP(73);
- CASE_FIXED_FP(74);
- CASE_FIXED_FP(75);
- CASE_FIXED_FP(76);
- CASE_FIXED_FP(77);
- CASE_FIXED_FP(78);
- CASE_FIXED_FP(79);
- CASE_FIXED_FP(80);
- CASE_FIXED_FP(81);
- CASE_FIXED_FP(82);
- CASE_FIXED_FP(83);
- CASE_FIXED_FP(84);
- CASE_FIXED_FP(85);
- CASE_FIXED_FP(86);
- CASE_FIXED_FP(87);
- CASE_FIXED_FP(88);
- CASE_FIXED_FP(89);
- CASE_FIXED_FP(90);
- CASE_FIXED_FP(91);
- CASE_FIXED_FP(92);
- CASE_FIXED_FP(93);
- CASE_FIXED_FP(94);
- CASE_FIXED_FP(95);
- CASE_FIXED_FP(96);
- CASE_FIXED_FP(97);
- CASE_FIXED_FP(98);
- CASE_FIXED_FP(99);
- CASE_FIXED_FP(100);
- CASE_FIXED_FP(101);
- CASE_FIXED_FP(102);
- CASE_FIXED_FP(103);
- CASE_FIXED_FP(104);
- CASE_FIXED_FP(105);
- CASE_FIXED_FP(106);
- CASE_FIXED_FP(107);
- CASE_FIXED_FP(108);
- CASE_FIXED_FP(109);
- CASE_FIXED_FP(110);
- CASE_FIXED_FP(111);
- CASE_FIXED_FP(112);
- CASE_FIXED_FP(113);
- CASE_FIXED_FP(114);
- CASE_FIXED_FP(115);
- CASE_FIXED_FP(116);
- CASE_FIXED_FP(117);
- CASE_FIXED_FP(118);
- CASE_FIXED_FP(119);
- CASE_FIXED_FP(120);
- CASE_FIXED_FP(121);
- CASE_FIXED_FP(122);
- CASE_FIXED_FP(123);
- CASE_FIXED_FP(124);
- CASE_FIXED_FP(125);
- CASE_FIXED_FP(126);
- CASE_FIXED_FP(127);
- }
-#undef CASE_FIXED_FP
-#undef CASE_RESTORED_FP
-}
-
-#endif /* XEN */
-
-
-#ifdef XEN
-void
-#else
-static void
-#endif
-getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
-{
- struct switch_stack *sw = (struct switch_stack *) regs - 1;
- unsigned long addr, *unat;
-
- if (regnum >= IA64_FIRST_STACKED_GR) {
- get_rse_reg(regs, regnum, val, nat);
- return;
- }
-
- /*
- * take care of r0 (read-only always evaluate to 0)
- */
- if (regnum == 0) {
- *val = 0;
- if (nat)
- *nat = 0;
- return;
- }
-
- /*
- * Now look at registers in [0-31] range and init correct UNAT
- */
- if (GR_IN_SW(regnum)) {
- addr = (unsigned long)sw;
- unat = &sw->ar_unat;
- } else {
- addr = (unsigned long)regs;
-#if defined(XEN)
- unat = &regs->eml_unat;
-#else
- unat = &sw->caller_unat;
-#endif
- }
-
- DPRINT("addr_base=%lx offset=0x%x\n", addr, GR_OFFS(regnum));
-
- addr += GR_OFFS(regnum);
-
- *val = *(unsigned long *)addr;
-
- /*
- * do it only when requested
- */
- if (nat)
- *nat = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
-}
-
-#ifndef XEN
-static void
-emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsigned long ifa)
-{
- /*
- * IMPORTANT:
- * Given the way we handle unaligned speculative loads, we should
- * not get to this point in the code but we keep this sanity check,
- * just in case.
- */
- if (ld.x6_op == 1 || ld.x6_op == 3) {
- printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__);
- die_if_kernel("unaligned reference on speculative load with register update\n",
- regs, 30);
- }
-
-
- /*
- * at this point, we know that the base register to update is valid i.e.,
- * it's not r0
- */
- if (type == UPD_IMMEDIATE) {
- unsigned long imm;
-
- /*
- * Load +Imm: ldXZ r1=[r3],imm(9)
- *
- *
- * form imm9: [13:19] contain the first 7 bits
- */
- imm = ld.x << 7 | ld.imm;
-
- /*
- * sign extend (1+8bits) if m set
- */
- if (ld.m) imm |= SIGN_EXT9;
-
- /*
- * ifa == r3 and we know that the NaT bit on r3 was clear so
- * we can directly use ifa.
- */
- ifa += imm;
-
- setreg(ld.r3, ifa, 0, regs);
-
- DPRINT("ld.x=%d ld.m=%d imm=%ld r3=0x%lx\n", ld.x, ld.m, imm, ifa);
-
- } else if (ld.m) {
- unsigned long r2;
- int nat_r2;
-
- /*
- * Load +Reg Opcode: ldXZ r1=[r3],r2
- *
- * Note: that we update r3 even in the case of ldfX.a
- * (where the load does not happen)
- *
- * The way the load algorithm works, we know that r3 does not
- * have its NaT bit set (would have gotten NaT consumption
- * before getting the unaligned fault). So we can use ifa
- * which equals r3 at this point.
- *
- * IMPORTANT:
- * The above statement holds ONLY because we know that we
- * never reach this code when trying to do a ldX.s.
- * If we ever make it to here on an ldfX.s then
- */
- getreg(ld.imm, &r2, &nat_r2, regs);
-
- ifa += r2;
-
- /*
- * propagate Nat r2 -> r3
- */
- setreg(ld.r3, ifa, nat_r2, regs);
-
- DPRINT("imm=%d r2=%ld r3=0x%lx nat_r2=%d\n",ld.imm, r2, ifa, nat_r2);
- }
-}
-
-
-static int
-emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
-{
- unsigned int len = 1 << ld.x6_sz;
- unsigned long val = 0;
-
- /*
- * r0, as target, doesn't need to be checked because Illegal Instruction
- * faults have higher priority than unaligned faults.
- *
- * r0 cannot be found as the base as it would never generate an
- * unaligned reference.
- */
-
- /*
- * ldX.a we will emulate load and also invalidate the ALAT entry.
- * See comment below for explanation on how we handle ldX.a
- */
-
- if (len != 2 && len != 4 && len != 8) {
- DPRINT("unknown size: x6=%d\n", ld.x6_sz);
- return -1;
- }
- /* this assumes little-endian byte-order: */
- if (copy_from_user(&val, (void __user *) ifa, len))
- return -1;
- setreg(ld.r1, val, 0, regs);
-
- /*
- * check for updates on any kind of loads
- */
- if (ld.op == 0x5 || ld.m)
- emulate_load_updates(ld.op == 0x5 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
-
- /*
- * handling of various loads (based on EAS2.4):
- *
- * ldX.acq (ordered load):
- * - acquire semantics would have been used, so force fence instead.
- *
- * ldX.c.clr (check load and clear):
- * - if we get to this handler, it's because the entry was not in the ALAT.
- * Therefore the operation reverts to a normal load
- *
- * ldX.c.nc (check load no clear):
- * - same as previous one
- *
- * ldX.c.clr.acq (ordered check load and clear):
- * - same as above for c.clr part. The load needs to have acquire semantics. So
- * we use the fence semantics which is stronger and thus ensures correctness.
- *
- * ldX.a (advanced load):
- * - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's because the
- * address doesn't match requested size alignment. This means that we would
- * possibly need more than one load to get the result.
- *
- * The load part can be handled just like a normal load, however the difficult
- * part is to get the right thing into the ALAT. The critical piece of information
- * in the base address of the load & size. To do that, a ld.a must be executed,
- * clearly any address can be pushed into the table by using ld1.a r1=[r3]. Now
- * if we use the same target register, we will be okay for the check.a instruction.
- * If we look at the store, basically a stX [r3]=r1 checks the ALAT for any entry
- * which would overlap within [r3,r3+X] (the size of the load was store in the
- * ALAT). If such an entry is found the entry is invalidated. But this is not good
- * enough, take the following example:
- * r3=3
- * ld4.a r1=[r3]
- *
- * Could be emulated by doing:
- * ld1.a r1=[r3],1
- * store to temporary;
- * ld1.a r1=[r3],1
- * store & shift to temporary;
- * ld1.a r1=[r3],1
- * store & shift to temporary;
- * ld1.a r1=[r3]
- * store & shift to temporary;
- * r1=temporary
- *
- * So in this case, you would get the right value is r1 but the wrong info in
- * the ALAT. Notice that you could do it in reverse to finish with address 3
- * but you would still get the size wrong. To get the size right, one needs to
- * execute exactly the same kind of load. You could do it from a aligned
- * temporary location, but you would get the address wrong.
- *
- * So no matter what, it is not possible to emulate an advanced load
- * correctly. But is that really critical ?
- *
- * We will always convert ld.a into a normal load with ALAT invalidated. This
- * will enable compiler to do optimization where certain code path after ld.a
- * is not required to have ld.c/chk.a, e.g., code path with no intervening stores.
- *
- * If there is a store after the advanced load, one must either do a ld.c.* or
- * chk.a.* to reuse the value stored in the ALAT. Both can "fail" (meaning no
- * entry found in ALAT), and that's perfectly ok because:
- *
- * - ld.c.*, if the entry is not present a normal load is executed
- * - chk.a.*, if the entry is not present, execution jumps to recovery code
- *
- * In either case, the load can be potentially retried in another form.
- *
- * ALAT must be invalidated for the register (so that chk.a or ld.c don't pick
- * up a stale entry later). The register base update MUST also be performed.
- */
-
- /*
- * when the load has the .acq completer then
- * use ordering fence.
- */
- if (ld.x6_op == 0x5 || ld.x6_op == 0xa)
- mb();
-
- /*
- * invalidate ALAT entry in case of advanced load
- */
- if (ld.x6_op == 0x2)
- invala_gr(ld.r1);
-
- return 0;
-}
-
-static int
-emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
-{
- unsigned long r2;
- unsigned int len = 1 << ld.x6_sz;
-
- /*
- * if we get to this handler, Nat bits on both r3 and r2 have already
- * been checked. so we don't need to do it
- *
- * extract the value to be stored
- */
- getreg(ld.imm, &r2, NULL, regs);
-
- /*
- * we rely on the macros in unaligned.h for now i.e.,
- * we let the compiler figure out how to read memory gracefully.
- *
- * We need this switch/case because the way the inline function
- * works. The code is optimized by the compiler and looks like
- * a single switch/case.
- */
- DPRINT("st%d [%lx]=%lx\n", len, ifa, r2);
-
- if (len != 2 && len != 4 && len != 8) {
- DPRINT("unknown size: x6=%d\n", ld.x6_sz);
- return -1;
- }
-
- /* this assumes little-endian byte-order: */
- if (copy_to_user((void __user *) ifa, &r2, len))
- return -1;
-
- /*
- * stX [r3]=r2,imm(9)
- *
- * NOTE:
- * ld.r3 can never be r0, because r0 would not generate an
- * unaligned access.
- */
- if (ld.op == 0x5) {
- unsigned long imm;
-
- /*
- * form imm9: [12:6] contain first 7bits
- */
- imm = ld.x << 7 | ld.r1;
- /*
- * sign extend (8bits) if m set
- */
- if (ld.m) imm |= SIGN_EXT9;
- /*
- * ifa == r3 (NaT is necessarily cleared)
- */
- ifa += imm;
-
- DPRINT("imm=%lx r3=%lx\n", imm, ifa);
-
- setreg(ld.r3, ifa, 0, regs);
- }
- /*
- * we don't have alat_invalidate_multiple() so we need
- * to do the complete flush :-<<
- */
- ia64_invala();
-
- /*
- * stX.rel: use fence instead of release
- */
- if (ld.x6_op == 0xd)
- mb();
-
- return 0;
-}
-#endif /* XEN */
-
-/*
- * floating point operations sizes in bytes
- */
-static const unsigned char float_fsz[4]={
- 10, /* extended precision (e) */
- 8, /* integer (8) */
- 4, /* single precision (s) */
- 8 /* double precision (d) */
-};
-
-static inline void
-mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldfe(6, init);
- ia64_stop();
- ia64_stf_spill(final, 6);
-}
-
-static inline void
-mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldf8(6, init);
- ia64_stop();
- ia64_stf_spill(final, 6);
-}
-
-static inline void
-mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldfs(6, init);
- ia64_stop();
- ia64_stf_spill(final, 6);
-}
-
-static inline void
-mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldfd(6, init);
- ia64_stop();
- ia64_stf_spill(final, 6);
-}
-
-static inline void
-float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldf_fill(6, init);
- ia64_stop();
- ia64_stfe(final, 6);
-}
-
-static inline void
-float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldf_fill(6, init);
- ia64_stop();
- ia64_stf8(final, 6);
-}
-
-static inline void
-float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldf_fill(6, init);
- ia64_stop();
- ia64_stfs(final, 6);
-}
-
-static inline void
-float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
- ia64_ldf_fill(6, init);
- ia64_stop();
- ia64_stfd(final, 6);
-}
-
-#ifndef XEN
-static int
-emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
-{
- struct ia64_fpreg fpr_init[2];
- struct ia64_fpreg fpr_final[2];
- unsigned long len = float_fsz[ld.x6_sz];
-
- /*
- * fr0 & fr1 don't need to be checked because Illegal Instruction faults have
- * higher priority than unaligned faults.
- *
- * r0 cannot be found as the base as it would never generate an unaligned
- * reference.
- */
-
- /*
- * make sure we get clean buffers
- */
- memset(&fpr_init, 0, sizeof(fpr_init));
- memset(&fpr_final, 0, sizeof(fpr_final));
-
- /*
- * ldfpX.a: we don't try to emulate anything but we must
- * invalidate the ALAT entry and execute updates, if any.
- */
- if (ld.x6_op != 0x2) {
- /*
- * This assumes little-endian byte-order. Note that there is no "ldfpe"
- * instruction:
- */
- if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)
- || copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len))
- return -1;
-
- DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);
- DDUMP("frp_init =", &fpr_init, 2*len);
- /*
- * XXX fixme
- * Could optimize inlines by using ldfpX & 2 spills
- */
- switch( ld.x6_sz ) {
- case 0:
- mem2float_extended(&fpr_init[0], &fpr_final[0]);
- mem2float_extended(&fpr_init[1], &fpr_final[1]);
- break;
- case 1:
- mem2float_integer(&fpr_init[0], &fpr_final[0]);
- mem2float_integer(&fpr_init[1], &fpr_final[1]);
- break;
- case 2:
- mem2float_single(&fpr_init[0], &fpr_final[0]);
- mem2float_single(&fpr_init[1], &fpr_final[1]);
- break;
- case 3:
- mem2float_double(&fpr_init[0], &fpr_final[0]);
- mem2float_double(&fpr_init[1], &fpr_final[1]);
- break;
- }
- DDUMP("fpr_final =", &fpr_final, 2*len);
- /*
- * XXX fixme
- *
- * A possible optimization would be to drop fpr_final and directly
- * use the storage from the saved context i.e., the actual final
- * destination (pt_regs, switch_stack or thread structure).
- */
- setfpreg(ld.r1, &fpr_final[0], regs);
- setfpreg(ld.imm, &fpr_final[1], regs);
- }
-
- /*
- * Check for updates: only immediate updates are available for this
- * instruction.
- */
- if (ld.m) {
- /*
- * the immediate is implicit given the ldsz of the operation:
- * single: 8 (2x4) and for all others it's 16 (2x8)
- */
- ifa += len<<1;
-
- /*
- * IMPORTANT:
- * the fact that we force the NaT of r3 to zero is ONLY valid
- * as long as we don't come here with a ldfpX.s.
- * For this reason we keep this sanity check
- */
- if (ld.x6_op == 1 || ld.x6_op == 3)
- printk(KERN_ERR "%s: register update on speculative load pair, error\n",
- __FUNCTION__);
-
- setreg(ld.r3, ifa, 0, regs);
- }
-
- /*
- * Invalidate ALAT entries, if any, for both registers.
- */
- if (ld.x6_op == 0x2) {
- invala_fr(ld.r1);
- invala_fr(ld.imm);
- }
- return 0;
-}
-
-
-static int
-emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
-{
- struct ia64_fpreg fpr_init;
- struct ia64_fpreg fpr_final;
- unsigned long len = float_fsz[ld.x6_sz];
-
- /*
- * fr0 & fr1 don't need to be checked because Illegal Instruction
- * faults have higher priority than unaligned faults.
- *
- * r0 cannot be found as the base as it would never generate an
- * unaligned reference.
- */
-
- /*
- * make sure we get clean buffers
- */
- memset(&fpr_init,0, sizeof(fpr_init));
- memset(&fpr_final,0, sizeof(fpr_final));
-
- /*
- * ldfX.a we don't try to emulate anything but we must
- * invalidate the ALAT entry.
- * See comments in ldX for descriptions on how the various loads are handled.
- */
- if (ld.x6_op != 0x2) {
- if (copy_from_user(&fpr_init, (void __user *) ifa, len))
- return -1;
-
- DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
- DDUMP("fpr_init =", &fpr_init, len);
- /*
- * we only do something for x6_op={0,8,9}
- */
- switch( ld.x6_sz ) {
- case 0:
- mem2float_extended(&fpr_init, &fpr_final);
- break;
- case 1:
- mem2float_integer(&fpr_init, &fpr_final);
- break;
- case 2:
- mem2float_single(&fpr_init, &fpr_final);
- break;
- case 3:
- mem2float_double(&fpr_init, &fpr_final);
- break;
- }
- DDUMP("fpr_final =", &fpr_final, len);
- /*
- * XXX fixme
- *
- * A possible optimization would be to drop fpr_final and directly
- * use the storage from the saved context i.e., the actual final
- * destination (pt_regs, switch_stack or thread structure).
- */
- setfpreg(ld.r1, &fpr_final, regs);
- }
-
- /*
- * check for updates on any loads
- */
- if (ld.op == 0x7 || ld.m)
- emulate_load_updates(ld.op == 0x7 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
-
- /*
- * invalidate ALAT entry in case of advanced floating point loads
- */
- if (ld.x6_op == 0x2)
- invala_fr(ld.r1);
-
- return 0;
-}
-
-
-static int
-emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
-{
- struct ia64_fpreg fpr_init;
- struct ia64_fpreg fpr_final;
- unsigned long len = float_fsz[ld.x6_sz];
-
- /*
- * make sure we get clean buffers
- */
- memset(&fpr_init,0, sizeof(fpr_init));
- memset(&fpr_final,0, sizeof(fpr_final));
-
- /*
- * if we get to this handler, Nat bits on both r3 and r2 have already
- * been checked. so we don't need to do it
- *
- * extract the value to be stored
- */
- getfpreg(ld.imm, &fpr_init, regs);
- /*
- * during this step, we extract the spilled registers from the saved
- * context i.e., we refill. Then we store (no spill) to temporary
- * aligned location
- */
- switch( ld.x6_sz ) {
- case 0:
- float2mem_extended(&fpr_init, &fpr_final);
- break;
- case 1:
- float2mem_integer(&fpr_init, &fpr_final);
- break;
- case 2:
- float2mem_single(&fpr_init, &fpr_final);
- break;
- case 3:
- float2mem_double(&fpr_init, &fpr_final);
- break;
- }
- DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
- DDUMP("fpr_init =", &fpr_init, len);
- DDUMP("fpr_final =", &fpr_final, len);
-
- if (copy_to_user((void __user *) ifa, &fpr_final, len))
- return -1;
-
- /*
- * stfX [r3]=r2,imm(9)
- *
- * NOTE:
- * ld.r3 can never be r0, because r0 would not generate an
- * unaligned access.
- */
- if (ld.op == 0x7) {
- unsigned long imm;
-
- /*
- * form imm9: [12:6] contain first 7bits
- */
- imm = ld.x << 7 | ld.r1;
- /*
- * sign extend (8bits) if m set
- */
- if (ld.m)
- imm |= SIGN_EXT9;
- /*
- * ifa == r3 (NaT is necessarily cleared)
- */
- ifa += imm;
-
- DPRINT("imm=%lx r3=%lx\n", imm, ifa);
-
- setreg(ld.r3, ifa, 0, regs);
- }
- /*
- * we don't have alat_invalidate_multiple() so we need
- * to do the complete flush :-<<
- */
- ia64_invala();
-
- return 0;
-}
-
-/*
- * Make sure we log the unaligned access, so that user/sysadmin can notice it and
- * eventually fix the program. However, we don't want to do that for every access so we
- * pace it with jiffies. This isn't really MP-safe, but it doesn't really have to be
- * either...
- */
-static int
-within_logging_rate_limit (void)
-{
- static unsigned long count, last_time;
-
- if (jiffies - last_time > 5*HZ)
- count = 0;
- if (++count < 5) {
- last_time = jiffies;
- return 1;
- }
- return 0;
-
-}
-#endif /* XEN */
-
-void
-ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
-{
-#ifdef XEN
-printk("ia64_handle_unaligned: called, not working yet\n");
-#else
- struct ia64_psr *ipsr = ia64_psr(regs);
- mm_segment_t old_fs = get_fs();
- unsigned long bundle[2];
- unsigned long opcode;
- struct siginfo si;
- const struct exception_table_entry *eh = NULL;
- union {
- unsigned long l;
- load_store_t insn;
- } u;
- int ret = -1;
-
- if (ia64_psr(regs)->be) {
- /* we don't support big-endian accesses */
- die_if_kernel("big-endian unaligned accesses are not supported", regs, 0);
- goto force_sigbus;
- }
-
- /*
- * Treat kernel accesses for which there is an exception handler entry the same as
- * user-level unaligned accesses. Otherwise, a clever program could trick this
- * handler into reading an arbitrary kernel addresses...
- */
- if (!user_mode(regs))
- eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
- if (user_mode(regs) || eh) {
- if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0)
- goto force_sigbus;
-
- if (!(current->thread.flags & IA64_THREAD_UAC_NOPRINT)
- && within_logging_rate_limit())
- {
- char buf[200]; /* comm[] is at most 16 bytes... */
- size_t len;
-
- len = snprintf(buf, sizeof(buf), "%s(%d): unaligned access to 0x%016lx, "
- "ip=0x%016lx\n\r", current->comm, current->pid,
- ifa, regs->cr_iip + ipsr->ri);
- /*
- * Don't call tty_write_message() if we're in the kernel; we might
- * be holding locks...
- */
- if (user_mode(regs))
- tty_write_message(current->signal->tty, buf);
- buf[len-1] = '\0'; /* drop '\r' */
- printk(KERN_WARNING "%s", buf); /* watch for command names containing %s */
- }
- } else {
- if (within_logging_rate_limit())
- printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
- ifa, regs->cr_iip + ipsr->ri);
- set_fs(KERNEL_DS);
- }
-
- DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
- regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
-
- if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
- goto failure;
-
- /*
- * extract the instruction from the bundle given the slot number
- */
- switch (ipsr->ri) {
- case 0: u.l = (bundle[0] >> 5); break;
- case 1: u.l = (bundle[0] >> 46) | (bundle[1] << 18); break;
- case 2: u.l = (bundle[1] >> 23); break;
- }
- opcode = (u.l >> IA64_OPCODE_SHIFT) & IA64_OPCODE_MASK;
-
- DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d ld.hint=%d "
- "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, u.insn.imm,
- u.insn.r3, u.insn.x, u.insn.hint, u.insn.x6_sz, u.insn.m, u.insn.op);
-
- /*
- * IMPORTANT:
- * Notice that the switch statement DOES not cover all possible instructions
- * that DO generate unaligned references. This is made on purpose because for some
- * instructions it DOES NOT make sense to try and emulate the access. Sometimes it
- * is WRONG to try and emulate. Here is a list of instruction we don't emulate i.e.,
- * the program will get a signal and die:
- *
- * load/store:
- * - ldX.spill
- * - stX.spill
- * Reason: RNATs are based on addresses
- * - ld16
- * - st16
- * Reason: ld16 and st16 are supposed to occur in a single
- * memory op
- *
- * synchronization:
- * - cmpxchg
- * - fetchadd
- * - xchg
- * Reason: ATOMIC operations cannot be emulated properly using multiple
- * instructions.
- *
- * speculative loads:
- * - ldX.sZ
- * Reason: side effects, code must be ready to deal with failure so simpler
- * to let the load fail.
- * ---------------------------------------------------------------------------------
- * XXX fixme
- *
- * I would like to get rid of this switch case and do something
- * more elegant.
- */
- switch (opcode) {
- case LDS_OP:
- case LDSA_OP:
- if (u.insn.x)
- /* oops, really a semaphore op (cmpxchg, etc) */
- goto failure;
- /* no break */
- case LDS_IMM_OP:
- case LDSA_IMM_OP:
- case LDFS_OP:
- case LDFSA_OP:
- case LDFS_IMM_OP:
- /*
- * The instruction will be retried with deferred exceptions turned on, and
- * we should get Nat bit installed
- *
- * IMPORTANT: When PSR_ED is set, the register & immediate update forms
- * are actually executed even though the operation failed. So we don't
- * need to take care of this.
- */
- DPRINT("forcing PSR_ED\n");
- regs->cr_ipsr |= IA64_PSR_ED;
- goto done;
-
- case LD_OP:
- case LDA_OP:
- case LDBIAS_OP:
- case LDACQ_OP:
- case LDCCLR_OP:
- case LDCNC_OP:
- case LDCCLRACQ_OP:
- if (u.insn.x)
- /* oops, really a semaphore op (cmpxchg, etc) */
- goto failure;
- /* no break */
- case LD_IMM_OP:
- case LDA_IMM_OP:
- case LDBIAS_IMM_OP:
- case LDACQ_IMM_OP:
- case LDCCLR_IMM_OP:
- case LDCNC_IMM_OP:
- case LDCCLRACQ_IMM_OP:
- ret = emulate_load_int(ifa, u.insn, regs);
- break;
-
- case ST_OP:
- case STREL_OP:
- if (u.insn.x)
- /* oops, really a semaphore op (cmpxchg, etc) */
- goto failure;
- /* no break */
- case ST_IMM_OP:
- case STREL_IMM_OP:
- ret = emulate_store_int(ifa, u.insn, regs);
- break;
-
- case LDF_OP:
- case LDFA_OP:
- case LDFCCLR_OP:
- case LDFCNC_OP:
- case LDF_IMM_OP:
- case LDFA_IMM_OP:
- case LDFCCLR_IMM_OP:
- case LDFCNC_IMM_OP:
- if (u.insn.x)
- ret = emulate_load_floatpair(ifa, u.insn, regs);
- else
- ret = emulate_load_float(ifa, u.insn, regs);
- break;
-
- case STF_OP:
- case STF_IMM_OP:
- ret = emulate_store_float(ifa, u.insn, regs);
- break;
-
- default:
- goto failure;
- }
- DPRINT("ret=%d\n", ret);
- if (ret)
- goto failure;
-
- if (ipsr->ri == 2)
- /*
- * given today's architecture this case is not likely to happen because a
- * memory access instruction (M) can never be in the last slot of a
- * bundle. But let's keep it for now.
- */
- regs->cr_iip += 16;
- ipsr->ri = (ipsr->ri + 1) & 0x3;
-
- DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
- done:
- set_fs(old_fs); /* restore original address limit */
- return;
-
- failure:
- /* something went wrong... */
- if (!user_mode(regs)) {
- if (eh) {
- ia64_handle_exception(regs, eh);
- goto done;
- }
- die_if_kernel("error during unaligned kernel access\n", regs, ret);
- /* NOT_REACHED */
- }
- force_sigbus:
- si.si_signo = SIGBUS;
- si.si_errno = 0;
- si.si_code = BUS_ADRALN;
- si.si_addr = (void __user *) ifa;
- si.si_flags = 0;
- si.si_isr = 0;
- si.si_imm = 0;
- force_sig_info(SIGBUS, &si, current);
- goto done;
-#endif
-}
diff --git a/xen/arch/ia64/linux-xen/unwind.c b/xen/arch/ia64/linux-xen/unwind.c
deleted file mode 100644
index 469c3bbc1f..0000000000
--- a/xen/arch/ia64/linux-xen/unwind.c
+++ /dev/null
@@ -1,2393 +0,0 @@
-/*
- * Copyright (C) 1999-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
- * - Change pt_regs_off() to make it less dependant on pt_regs structure.
- */
-/*
- * This file implements call frame unwind support for the Linux
- * kernel. Parsing and processing the unwind information is
- * time-consuming, so this implementation translates the unwind
- * descriptors into unwind scripts. These scripts are very simple
- * (basically a sequence of assignments) and efficient to execute.
- * They are cached for later re-use. Each script is specific for a
- * given instruction pointer address and the set of predicate values
- * that the script depends on (most unwind descriptors are
- * unconditional and scripts often do not depend on predicates at
- * all). This code is based on the unwind conventions described in
- * the "IA-64 Software Conventions and Runtime Architecture" manual.
- *
- * SMP conventions:
- * o updates to the global unwind data (in structure "unw") are serialized
- * by the unw.lock spinlock
- * o each unwind script has its own read-write lock; a thread must acquire
- * a read lock before executing a script and must acquire a write lock
- * before modifying a script
- * o if both the unw.lock spinlock and a script's read-write lock must be
- * acquired, then the read-write lock must be acquired first.
- */
-#ifdef XEN
-#include <xen/types.h>
-#include <xen/elf.h>
-#include <xen/kernel.h>
-#include <xen/sched.h>
-#include <xen/xmalloc.h>
-#include <xen/spinlock.h>
-#include <xen/errno.h>
-
-// work around
-// write_trylock() does bug check, but stack unwinder can be called
-// subtle situation, so skip bug check.
-#undef write_trylock
-#ifdef CONFIG_SMP
-#define write_trylock(lock) _raw_write_trylock(lock)
-#else
-#define write_trylock(lock) ({1;})
-#endif
-
-#else
-#include <linux/module.h>
-#include <linux/bootmem.h>
-#include <linux/elf.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#endif
-
-#include <asm/unwind.h>
-
-#include <asm/delay.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/ptrace_offsets.h>
-#include <asm/rse.h>
-#include <asm/sections.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#include "entry.h"
-#include "unwind_i.h"
-
-#define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
-#define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
-
-#define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
-#define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
-
-#define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
-
-#ifdef UNW_DEBUG
- static unsigned int unw_debug_level = UNW_DEBUG;
-# define UNW_DEBUG_ON(n) unw_debug_level >= n
- /* Do not code a printk level, not all debug lines end in newline */
-# define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
-# define inline
-#else /* !UNW_DEBUG */
-# define UNW_DEBUG_ON(n) 0
-# define UNW_DPRINT(n, ...)
-#endif /* UNW_DEBUG */
-
-#if UNW_STATS
-# define STAT(x...) x
-#else
-# define STAT(x...)
-#endif
-
-#ifdef XEN
-#define alloc_reg_state() ({in_irq()? NULL: xmalloc(struct unw_reg_state);})
-#define free_reg_state(usr) xfree(usr)
-#define alloc_labeled_state() ({in_irq()? NULL: xmalloc(struct unw_labeled_state);})
-#define free_labeled_state(usr) xfree(usr)
-#else
-#define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
-#define free_reg_state(usr) kfree(usr)
-#define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
-#define free_labeled_state(usr) kfree(usr)
-#endif
-
-typedef unsigned long unw_word;
-typedef unsigned char unw_hash_index_t;
-
-static struct {
- spinlock_t lock; /* spinlock for unwind data */
-
- /* list of unwind tables (one per load-module) */
- struct unw_table *tables;
-
- unsigned long r0; /* constant 0 for r0 */
-
- /* table of registers that prologues can save (and order in which they're saved): */
- const unsigned char save_order[8];
-
- /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
- unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
-
- unsigned short lru_head; /* index of lead-recently used script */
- unsigned short lru_tail; /* index of most-recently used script */
-
- /* index into unw_frame_info for preserved register i */
- unsigned short preg_index[UNW_NUM_REGS];
-
- short pt_regs_offsets[32];
-
- /* unwind table for the kernel: */
- struct unw_table kernel_table;
-
- /* unwind table describing the gate page (kernel code that is mapped into user space): */
- size_t gate_table_size;
- unsigned long *gate_table;
-
- /* hash table that maps instruction pointer to script index: */
- unsigned short hash[UNW_HASH_SIZE];
-
- /* script cache: */
- struct unw_script cache[UNW_CACHE_SIZE];
-
-# ifdef UNW_DEBUG
- const char *preg_name[UNW_NUM_REGS];
-# endif
-# if UNW_STATS
- struct {
- struct {
- int lookups;
- int hinted_hits;
- int normal_hits;
- int collision_chain_traversals;
- } cache;
- struct {
- unsigned long build_time;
- unsigned long run_time;
- unsigned long parse_time;
- int builds;
- int news;
- int collisions;
- int runs;
- } script;
- struct {
- unsigned long init_time;
- unsigned long unwind_time;
- int inits;
- int unwinds;
- } api;
- } stat;
-# endif
-} unw = {
- .tables = &unw.kernel_table,
- .lock = SPIN_LOCK_UNLOCKED,
- .save_order = {
- UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
- UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
- },
- .preg_index = {
- offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
- offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
- offsetof(struct unw_frame_info, bsp_loc)/8,
- offsetof(struct unw_frame_info, bspstore_loc)/8,
- offsetof(struct unw_frame_info, pfs_loc)/8,
- offsetof(struct unw_frame_info, rnat_loc)/8,
- offsetof(struct unw_frame_info, psp)/8,
- offsetof(struct unw_frame_info, rp_loc)/8,
- offsetof(struct unw_frame_info, r4)/8,
- offsetof(struct unw_frame_info, r5)/8,
- offsetof(struct unw_frame_info, r6)/8,
- offsetof(struct unw_frame_info, r7)/8,
- offsetof(struct unw_frame_info, unat_loc)/8,
- offsetof(struct unw_frame_info, pr_loc)/8,
- offsetof(struct unw_frame_info, lc_loc)/8,
- offsetof(struct unw_frame_info, fpsr_loc)/8,
- offsetof(struct unw_frame_info, b1_loc)/8,
- offsetof(struct unw_frame_info, b2_loc)/8,
- offsetof(struct unw_frame_info, b3_loc)/8,
- offsetof(struct unw_frame_info, b4_loc)/8,
- offsetof(struct unw_frame_info, b5_loc)/8,
- offsetof(struct unw_frame_info, f2_loc)/8,
- offsetof(struct unw_frame_info, f3_loc)/8,
- offsetof(struct unw_frame_info, f4_loc)/8,
- offsetof(struct unw_frame_info, f5_loc)/8,
- offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
- offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
- },
- .pt_regs_offsets = {
- [0] = -1,
- offsetof(struct pt_regs, r1),
- offsetof(struct pt_regs, r2),
- offsetof(struct pt_regs, r3),
- [4] = -1, [5] = -1, [6] = -1, [7] = -1,
- offsetof(struct pt_regs, r8),
- offsetof(struct pt_regs, r9),
- offsetof(struct pt_regs, r10),
- offsetof(struct pt_regs, r11),
- offsetof(struct pt_regs, r12),
- offsetof(struct pt_regs, r13),
- offsetof(struct pt_regs, r14),
- offsetof(struct pt_regs, r15),
- offsetof(struct pt_regs, r16),
- offsetof(struct pt_regs, r17),
- offsetof(struct pt_regs, r18),
- offsetof(struct pt_regs, r19),
- offsetof(struct pt_regs, r20),
- offsetof(struct pt_regs, r21),
- offsetof(struct pt_regs, r22),
- offsetof(struct pt_regs, r23),
- offsetof(struct pt_regs, r24),
- offsetof(struct pt_regs, r25),
- offsetof(struct pt_regs, r26),
- offsetof(struct pt_regs, r27),
- offsetof(struct pt_regs, r28),
- offsetof(struct pt_regs, r29),
- offsetof(struct pt_regs, r30),
- offsetof(struct pt_regs, r31),
- },
- .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
-#ifdef UNW_DEBUG
- .preg_name = {
- "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
- "r4", "r5", "r6", "r7",
- "ar.unat", "pr", "ar.lc", "ar.fpsr",
- "b1", "b2", "b3", "b4", "b5",
- "f2", "f3", "f4", "f5",
- "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
- "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
- }
-#endif
-};
-
-static inline int
-read_only (void *addr)
-{
- return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
-}
-
-/*
- * Returns offset of rREG in struct pt_regs.
- */
-static inline unsigned long
-pt_regs_off (unsigned long reg)
-{
- short off = -1;
-
- if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
- off = unw.pt_regs_offsets[reg];
-
- if (off < 0) {
- UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
- off = 0;
- }
- return (unsigned long) off;
-}
-
-static inline struct pt_regs *
-get_scratch_regs (struct unw_frame_info *info)
-{
- if (!info->pt) {
- /* This should not happen with valid unwind info. */
- UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
- if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
- info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
- else
- info->pt = info->sp - 16;
- }
- UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
- return (struct pt_regs *) info->pt;
-}
-
-/* Unwind accessors. */
-
-int
-unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
-{
- unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
- struct unw_ireg *ireg;
- struct pt_regs *pt;
-
- if ((unsigned) regnum - 1 >= 127) {
- if (regnum == 0 && !write) {
- *val = 0; /* read r0 always returns 0 */
- *nat = 0;
- return 0;
- }
- UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
- __FUNCTION__, regnum);
- return -1;
- }
-
- if (regnum < 32) {
- if (regnum >= 4 && regnum <= 7) {
- /* access a preserved register */
- ireg = &info->r4 + (regnum - 4);
- addr = ireg->loc;
- if (addr) {
- nat_addr = addr + ireg->nat.off;
- switch (ireg->nat.type) {
- case UNW_NAT_VAL:
- /* simulate getf.sig/setf.sig */
- if (write) {
- if (*nat) {
- /* write NaTVal and be done with it */
- addr[0] = 0;
- addr[1] = 0x1fffe;
- return 0;
- }
- addr[1] = 0x1003e;
- } else {
- if (addr[0] == 0 && addr[1] == 0x1ffe) {
- /* return NaT and be done with it */
- *val = 0;
- *nat = 1;
- return 0;
- }
- }
- /* fall through */
- case UNW_NAT_NONE:
- dummy_nat = 0;
- nat_addr = &dummy_nat;
- break;
-
- case UNW_NAT_MEMSTK:
- nat_mask = (1UL << ((long) addr & 0x1f8)/8);
- break;
-
- case UNW_NAT_REGSTK:
- nat_addr = ia64_rse_rnat_addr(addr);
- if ((unsigned long) addr < info->regstk.limit
- || (unsigned long) addr >= info->regstk.top)
- {
- UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
- "[0x%lx-0x%lx)\n",
- __FUNCTION__, (void *) addr,
- info->regstk.limit,
- info->regstk.top);
- return -1;
- }
- if ((unsigned long) nat_addr >= info->regstk.top)
- nat_addr = &info->sw->ar_rnat;
- nat_mask = (1UL << ia64_rse_slot_num(addr));
- break;
- }
- } else {
- addr = &info->sw->r4 + (regnum - 4);
- nat_addr = &info->sw->ar_unat;
- nat_mask = (1UL << ((long) addr & 0x1f8)/8);
- }
- } else {
- /* access a scratch register */
- pt = get_scratch_regs(info);
- addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
- if (info->pri_unat_loc)
- nat_addr = info->pri_unat_loc;
- else
- nat_addr = &info->sw->caller_unat;
- nat_mask = (1UL << ((long) addr & 0x1f8)/8);
- }
- } else {
- /* access a stacked register */
- addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
- nat_addr = ia64_rse_rnat_addr(addr);
- if ((unsigned long) addr < info->regstk.limit
- || (unsigned long) addr >= info->regstk.top)
- {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
- "of rbs\n", __FUNCTION__);
- return -1;
- }
- if ((unsigned long) nat_addr >= info->regstk.top)
- nat_addr = &info->sw->ar_rnat;
- nat_mask = (1UL << ia64_rse_slot_num(addr));
- }
-
- if (write) {
- if (read_only(addr)) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __FUNCTION__);
- } else {
- *addr = *val;
- if (*nat)
- *nat_addr |= nat_mask;
- else
- *nat_addr &= ~nat_mask;
- }
- } else {
- if ((*nat_addr & nat_mask) == 0) {
- *val = *addr;
- *nat = 0;
- } else {
- *val = 0; /* if register is a NaT, *addr may contain kernel data! */
- *nat = 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(unw_access_gr);
-
-int
-unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
-{
- unsigned long *addr;
- struct pt_regs *pt;
-
- switch (regnum) {
- /* scratch: */
- case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
- case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
- case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
-
- /* preserved: */
- case 1: case 2: case 3: case 4: case 5:
- addr = *(&info->b1_loc + (regnum - 1));
- if (!addr)
- addr = &info->sw->b1 + (regnum - 1);
- break;
-
- default:
- UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
- __FUNCTION__, regnum);
- return -1;
- }
- if (write)
- if (read_only(addr)) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __FUNCTION__);
- } else
- *addr = *val;
- else
- *val = *addr;
- return 0;
-}
-EXPORT_SYMBOL(unw_access_br);
-
-int
-unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
-{
- struct ia64_fpreg *addr = NULL;
- struct pt_regs *pt;
-
- if ((unsigned) (regnum - 2) >= 126) {
- UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
- __FUNCTION__, regnum);
- return -1;
- }
-
- if (regnum <= 5) {
- addr = *(&info->f2_loc + (regnum - 2));
- if (!addr)
- addr = &info->sw->f2 + (regnum - 2);
- } else if (regnum <= 15) {
- if (regnum <= 11) {
- pt = get_scratch_regs(info);
- //XXX struct ia64_fpreg and struct pt_fpreg are same.
- addr = (struct ia64_fpreg*)(&pt->f6 + (regnum - 6));
- }
- else
- addr = &info->sw->f12 + (regnum - 12);
- } else if (regnum <= 31) {
- addr = info->fr_loc[regnum - 16];
- if (!addr)
- addr = &info->sw->f16 + (regnum - 16);
- } else {
- struct task_struct *t = info->task;
-
- if (write)
- ia64_sync_fph(t);
- else
- ia64_flush_fph(t);
-#ifdef XEN
- addr = t->arch._thread.fph + (regnum - 32);
-#else
- addr = t->thread.fph + (regnum - 32);
-#endif
- }
-
- if (write)
- if (read_only(addr)) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __FUNCTION__);
- } else
- *addr = *val;
- else
- *val = *addr;
- return 0;
-}
-EXPORT_SYMBOL(unw_access_fr);
-
-int
-unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
-{
- unsigned long *addr;
- struct pt_regs *pt;
-
- switch (regnum) {
- case UNW_AR_BSP:
- addr = info->bsp_loc;
- if (!addr)
- addr = &info->sw->ar_bspstore;
- break;
-
- case UNW_AR_BSPSTORE:
- addr = info->bspstore_loc;
- if (!addr)
- addr = &info->sw->ar_bspstore;
- break;
-
- case UNW_AR_PFS:
- addr = info->pfs_loc;
- if (!addr)
- addr = &info->sw->ar_pfs;
- break;
-
- case UNW_AR_RNAT:
- addr = info->rnat_loc;
- if (!addr)
- addr = &info->sw->ar_rnat;
- break;
-
- case UNW_AR_UNAT:
- addr = info->unat_loc;
- if (!addr)
- addr = &info->sw->caller_unat;
- break;
-
- case UNW_AR_LC:
- addr = info->lc_loc;
- if (!addr)
- addr = &info->sw->ar_lc;
- break;
-
- case UNW_AR_EC:
- if (!info->cfm_loc)
- return -1;
- if (write)
- *info->cfm_loc =
- (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
- else
- *val = (*info->cfm_loc >> 52) & 0x3f;
- return 0;
-
- case UNW_AR_FPSR:
- addr = info->fpsr_loc;
- if (!addr)
- addr = &info->sw->ar_fpsr;
- break;
-
- case UNW_AR_RSC:
- pt = get_scratch_regs(info);
- addr = &pt->ar_rsc;
- break;
-
- case UNW_AR_CCV:
- pt = get_scratch_regs(info);
- addr = &pt->ar_ccv;
- break;
-
- case UNW_AR_CSD:
- pt = get_scratch_regs(info);
- addr = &pt->ar_csd;
- break;
-
- case UNW_AR_SSD:
- pt = get_scratch_regs(info);
- addr = &pt->ar_ssd;
- break;
-
- default:
- UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
- __FUNCTION__, regnum);
- return -1;
- }
-
- if (write) {
- if (read_only(addr)) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __FUNCTION__);
- } else
- *addr = *val;
- } else
- *val = *addr;
- return 0;
-}
-EXPORT_SYMBOL(unw_access_ar);
-
-int
-unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
-{
- unsigned long *addr;
-
- addr = info->pr_loc;
- if (!addr)
- addr = &info->sw->pr;
-
- if (write) {
- if (read_only(addr)) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
- __FUNCTION__);
- } else
- *addr = *val;
- } else
- *val = *addr;
- return 0;
-}
-EXPORT_SYMBOL(unw_access_pr);
-
-
-/* Routines to manipulate the state stack. */
-
-static inline void
-push (struct unw_state_record *sr)
-{
- struct unw_reg_state *rs;
-
- rs = alloc_reg_state();
- if (!rs) {
- printk(KERN_ERR "unwind: cannot stack reg state!\n");
- return;
- }
- memcpy(rs, &sr->curr, sizeof(*rs));
- sr->curr.next = rs;
-}
-
-static void
-pop (struct unw_state_record *sr)
-{
- struct unw_reg_state *rs = sr->curr.next;
-
- if (!rs) {
- printk(KERN_ERR "unwind: stack underflow!\n");
- return;
- }
- memcpy(&sr->curr, rs, sizeof(*rs));
- free_reg_state(rs);
-}
-
-/* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
-static struct unw_reg_state *
-dup_state_stack (struct unw_reg_state *rs)
-{
- struct unw_reg_state *copy, *prev = NULL, *first = NULL;
-
- while (rs) {
- copy = alloc_reg_state();
- if (!copy) {
- printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
- return NULL;
- }
- memcpy(copy, rs, sizeof(*copy));
- if (first)
- prev->next = copy;
- else
- first = copy;
- rs = rs->next;
- prev = copy;
- }
- return first;
-}
-
-/* Free all stacked register states (but not RS itself). */
-static void
-free_state_stack (struct unw_reg_state *rs)
-{
- struct unw_reg_state *p, *next;
-
- for (p = rs->next; p != NULL; p = next) {
- next = p->next;
- free_reg_state(p);
- }
- rs->next = NULL;
-}
-
-/* Unwind decoder routines */
-
-static enum unw_register_index __attribute_const__
-decode_abreg (unsigned char abreg, int memory)
-{
- switch (abreg) {
- case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
- case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
- case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
- case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
- case 0x60: return UNW_REG_PR;
- case 0x61: return UNW_REG_PSP;
- case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
- case 0x63: return UNW_REG_RP;
- case 0x64: return UNW_REG_BSP;
- case 0x65: return UNW_REG_BSPSTORE;
- case 0x66: return UNW_REG_RNAT;
- case 0x67: return UNW_REG_UNAT;
- case 0x68: return UNW_REG_FPSR;
- case 0x69: return UNW_REG_PFS;
- case 0x6a: return UNW_REG_LC;
- default:
- break;
- }
- UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
- return UNW_REG_LC;
-}
-
-static void
-set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
-{
- reg->val = val;
- reg->where = where;
- if (reg->when == UNW_WHEN_NEVER)
- reg->when = when;
-}
-
-static void
-alloc_spill_area (unsigned long *offp, unsigned long regsize,
- struct unw_reg_info *lo, struct unw_reg_info *hi)
-{
- struct unw_reg_info *reg;
-
- for (reg = hi; reg >= lo; --reg) {
- if (reg->where == UNW_WHERE_SPILL_HOME) {
- reg->where = UNW_WHERE_PSPREL;
- *offp -= regsize;
- reg->val = *offp;
- }
- }
-}
-
-static inline void
-spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
-{
- struct unw_reg_info *reg;
-
- for (reg = *regp; reg <= lim; ++reg) {
- if (reg->where == UNW_WHERE_SPILL_HOME) {
- reg->when = t;
- *regp = reg + 1;
- return;
- }
- }
- UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
-}
-
-static inline void
-finish_prologue (struct unw_state_record *sr)
-{
- struct unw_reg_info *reg;
- unsigned long off;
- int i;
-
- /*
- * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
- * for Using Unwind Descriptors", rule 3):
- */
- for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
- reg = sr->curr.reg + unw.save_order[i];
- if (reg->where == UNW_WHERE_GR_SAVE) {
- reg->where = UNW_WHERE_GR;
- reg->val = sr->gr_save_loc++;
- }
- }
-
- /*
- * Next, compute when the fp, general, and branch registers get
- * saved. This must come before alloc_spill_area() because
- * we need to know which registers are spilled to their home
- * locations.
- */
- if (sr->imask) {
- unsigned char kind, mask = 0, *cp = sr->imask;
- int t;
- static const unsigned char limit[3] = {
- UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
- };
- struct unw_reg_info *(regs[3]);
-
- regs[0] = sr->curr.reg + UNW_REG_F2;
- regs[1] = sr->curr.reg + UNW_REG_R4;
- regs[2] = sr->curr.reg + UNW_REG_B1;
-
- for (t = 0; t < sr->region_len; ++t) {
- if ((t & 3) == 0)
- mask = *cp++;
- kind = (mask >> 2*(3-(t & 3))) & 3;
- if (kind > 0)
- spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
- sr->region_start + t);
- }
- }
- /*
- * Next, lay out the memory stack spill area:
- */
- if (sr->any_spills) {
- off = sr->spill_offset;
- alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
- alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
- alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
- }
-}
-
-/*
- * Region header descriptors.
- */
-
-static void
-desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
- struct unw_state_record *sr)
-{
- int i, region_start;
-
- if (!(sr->in_body || sr->first_region))
- finish_prologue(sr);
- sr->first_region = 0;
-
- /* check if we're done: */
- if (sr->when_target < sr->region_start + sr->region_len) {
- sr->done = 1;
- return;
- }
-
- region_start = sr->region_start + sr->region_len;
-
- for (i = 0; i < sr->epilogue_count; ++i)
- pop(sr);
- sr->epilogue_count = 0;
- sr->epilogue_start = UNW_WHEN_NEVER;
-
- sr->region_start = region_start;
- sr->region_len = rlen;
- sr->in_body = body;
-
- if (!body) {
- push(sr);
-
- for (i = 0; i < 4; ++i) {
- if (mask & 0x8)
- set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
- sr->region_start + sr->region_len - 1, grsave++);
- mask <<= 1;
- }
- sr->gr_save_loc = grsave;
- sr->any_spills = 0;
- sr->imask = NULL;
- sr->spill_offset = 0x10; /* default to psp+16 */
- }
-}
-
-/*
- * Prologue descriptors.
- */
-
-static inline void
-desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
-{
- if (abi == 3 && context == 'i') {
- sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
- UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
- }
- else
- UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
- __FUNCTION__, abi, context);
-}
-
-static inline void
-desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 5; ++i) {
- if (brmask & 1)
- set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
- sr->region_start + sr->region_len - 1, gr++);
- brmask >>= 1;
- }
-}
-
-static inline void
-desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 5; ++i) {
- if (brmask & 1) {
- set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
- sr->region_start + sr->region_len - 1, 0);
- sr->any_spills = 1;
- }
- brmask >>= 1;
- }
-}
-
-static inline void
-desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 4; ++i) {
- if ((grmask & 1) != 0) {
- set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
- sr->region_start + sr->region_len - 1, 0);
- sr->any_spills = 1;
- }
- grmask >>= 1;
- }
- for (i = 0; i < 20; ++i) {
- if ((frmask & 1) != 0) {
- int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
- set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
- sr->region_start + sr->region_len - 1, 0);
- sr->any_spills = 1;
- }
- frmask >>= 1;
- }
-}
-
-static inline void
-desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 4; ++i) {
- if ((frmask & 1) != 0) {
- set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
- sr->region_start + sr->region_len - 1, 0);
- sr->any_spills = 1;
- }
- frmask >>= 1;
- }
-}
-
-static inline void
-desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 4; ++i) {
- if ((grmask & 1) != 0)
- set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
- sr->region_start + sr->region_len - 1, gr++);
- grmask >>= 1;
- }
-}
-
-static inline void
-desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
-{
- int i;
-
- for (i = 0; i < 4; ++i) {
- if ((grmask & 1) != 0) {
- set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
- sr->region_start + sr->region_len - 1, 0);
- sr->any_spills = 1;
- }
- grmask >>= 1;
- }
-}
-
-static inline void
-desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
-{
- set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
- sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
-}
-
-static inline void
-desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
-{
- sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
-}
-
-static inline void
-desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
-{
- set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
-}
-
-static inline void
-desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
-{
- set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
- 0x10 - 4*pspoff);
-}
-
-static inline void
-desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
-{
- set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
- 4*spoff);
-}
-
-static inline void
-desc_rp_br (unsigned char dst, struct unw_state_record *sr)
-{
- sr->return_link_reg = dst;
-}
-
-static inline void
-desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
-{
- struct unw_reg_info *reg = sr->curr.reg + regnum;
-
- if (reg->where == UNW_WHERE_NONE)
- reg->where = UNW_WHERE_GR_SAVE;
- reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
-}
-
-static inline void
-desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
-{
- sr->spill_offset = 0x10 - 4*pspoff;
-}
-
-static inline unsigned char *
-desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
-{
- sr->imask = imaskp;
- return imaskp + (2*sr->region_len + 7)/8;
-}
-
-/*
- * Body descriptors.
- */
-static inline void
-desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
-{
- sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
- sr->epilogue_count = ecount + 1;
-}
-
-static inline void
-desc_copy_state (unw_word label, struct unw_state_record *sr)
-{
- struct unw_labeled_state *ls;
-
- for (ls = sr->labeled_states; ls; ls = ls->next) {
- if (ls->label == label) {
- free_state_stack(&sr->curr);
- memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
- sr->curr.next = dup_state_stack(ls->saved_state.next);
- return;
- }
- }
- printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
-}
-
-static inline void
-desc_label_state (unw_word label, struct unw_state_record *sr)
-{
- struct unw_labeled_state *ls;
-
- ls = alloc_labeled_state();
- if (!ls) {
- printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
- return;
- }
- ls->label = label;
- memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
- ls->saved_state.next = dup_state_stack(sr->curr.next);
-
- /* insert into list of labeled states: */
- ls->next = sr->labeled_states;
- sr->labeled_states = ls;
-}
-
-/*
- * General descriptors.
- */
-
-static inline int
-desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
-{
- if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
- return 0;
- if (qp > 0) {
- if ((sr->pr_val & (1UL << qp)) == 0)
- return 0;
- sr->pr_mask |= (1UL << qp);
- }
- return 1;
-}
-
-static inline void
-desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
-{
- struct unw_reg_info *r;
-
- if (!desc_is_active(qp, t, sr))
- return;
-
- r = sr->curr.reg + decode_abreg(abreg, 0);
- r->where = UNW_WHERE_NONE;
- r->when = UNW_WHEN_NEVER;
- r->val = 0;
-}
-
-static inline void
-desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
- unsigned char ytreg, struct unw_state_record *sr)
-{
- enum unw_where where = UNW_WHERE_GR;
- struct unw_reg_info *r;
-
- if (!desc_is_active(qp, t, sr))
- return;
-
- if (x)
- where = UNW_WHERE_BR;
- else if (ytreg & 0x80)
- where = UNW_WHERE_FR;
-
- r = sr->curr.reg + decode_abreg(abreg, 0);
- r->where = where;
- r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
- r->val = (ytreg & 0x7f);
-}
-
-static inline void
-desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
- struct unw_state_record *sr)
-{
- struct unw_reg_info *r;
-
- if (!desc_is_active(qp, t, sr))
- return;
-
- r = sr->curr.reg + decode_abreg(abreg, 1);
- r->where = UNW_WHERE_PSPREL;
- r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
- r->val = 0x10 - 4*pspoff;
-}
-
-static inline void
-desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
- struct unw_state_record *sr)
-{
- struct unw_reg_info *r;
-
- if (!desc_is_active(qp, t, sr))
- return;
-
- r = sr->curr.reg + decode_abreg(abreg, 1);
- r->where = UNW_WHERE_SPREL;
- r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
- r->val = 4*spoff;
-}
-
-#define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
- code);
-
-/*
- * region headers:
- */
-#define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
-#define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
-/*
- * prologue descriptors:
- */
-#define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
-#define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
-#define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
-#define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
-#define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
-#define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
-#define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
-#define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
-#define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
-#define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
-#define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
-#define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
-#define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
-#define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
-#define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
-#define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
-#define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
-#define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
-#define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
-#define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
-#define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
-/*
- * body descriptors:
- */
-#define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
-#define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
-#define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
-/*
- * general unwind descriptors:
- */
-#define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
-#define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
-#define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
-#define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
-#define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
-#define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
-#define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
-#define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
-
-#include "unwind_decoder.c"
-
-
-/* Unwind scripts. */
-
-static inline unw_hash_index_t
-hash (unsigned long ip)
-{
-# define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */
-
- return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
-#undef hashmagic
-}
-
-static inline long
-cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
-{
- read_lock(&script->lock);
- if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
- /* keep the read lock... */
- return 1;
- read_unlock(&script->lock);
- return 0;
-}
-
-static inline struct unw_script *
-script_lookup (struct unw_frame_info *info)
-{
- struct unw_script *script = unw.cache + info->hint;
- unsigned short index;
- unsigned long ip, pr;
-
- if (UNW_DEBUG_ON(0))
- return NULL; /* Always regenerate scripts in debug mode */
-
- STAT(++unw.stat.cache.lookups);
-
- ip = info->ip;
- pr = info->pr;
-
- if (cache_match(script, ip, pr)) {
- STAT(++unw.stat.cache.hinted_hits);
- return script;
- }
-
- index = unw.hash[hash(ip)];
- if (index >= UNW_CACHE_SIZE)
- return NULL;
-
- script = unw.cache + index;
- while (1) {
- if (cache_match(script, ip, pr)) {
- /* update hint; no locking required as single-word writes are atomic */
- STAT(++unw.stat.cache.normal_hits);
- unw.cache[info->prev_script].hint = script - unw.cache;
- return script;
- }
- if (script->coll_chain >= UNW_HASH_SIZE)
- return NULL;
- script = unw.cache + script->coll_chain;
- STAT(++unw.stat.cache.collision_chain_traversals);
- }
-}
-
-/*
- * On returning, a write lock for the SCRIPT is still being held.
- */
-static inline struct unw_script *
-script_new (unsigned long ip)
-{
- struct unw_script *script, *prev, *tmp;
- unw_hash_index_t index;
- unsigned short head;
-
- STAT(++unw.stat.script.news);
-
- /*
- * Can't (easily) use cmpxchg() here because of ABA problem
- * that is intrinsic in cmpxchg()...
- */
- head = unw.lru_head;
- script = unw.cache + head;
- unw.lru_head = script->lru_chain;
-
- /*
- * We'd deadlock here if we interrupted a thread that is holding a read lock on
- * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
- * alternative would be to disable interrupts whenever we hold a read-lock, but
- * that seems silly.
- */
- if (!write_trylock(&script->lock))
- return NULL;
-
- /* re-insert script at the tail of the LRU chain: */
- unw.cache[unw.lru_tail].lru_chain = head;
- unw.lru_tail = head;
-
- /* remove the old script from the hash table (if it's there): */
- if (script->ip) {
- index = hash(script->ip);
- tmp = unw.cache + unw.hash[index];
- prev = NULL;
- while (1) {
- if (tmp == script) {
- if (prev)
- prev->coll_chain = tmp->coll_chain;
- else
- unw.hash[index] = tmp->coll_chain;
- break;
- } else
- prev = tmp;
- if (tmp->coll_chain >= UNW_CACHE_SIZE)
- /* old script wasn't in the hash-table */
- break;
- tmp = unw.cache + tmp->coll_chain;
- }
- }
-
- /* enter new script in the hash table */
- index = hash(ip);
- script->coll_chain = unw.hash[index];
- unw.hash[index] = script - unw.cache;
-
- script->ip = ip; /* set new IP while we're holding the locks */
-
- STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
-
- script->flags = 0;
- script->hint = 0;
- script->count = 0;
- return script;
-}
-
-static void
-script_finalize (struct unw_script *script, struct unw_state_record *sr)
-{
- script->pr_mask = sr->pr_mask;
- script->pr_val = sr->pr_val;
- /*
- * We could down-grade our write-lock on script->lock here but
- * the rwlock API doesn't offer atomic lock downgrading, so
- * we'll just keep the write-lock and release it later when
- * we're done using the script.
- */
-}
-
-static inline void
-script_emit (struct unw_script *script, struct unw_insn insn)
-{
- if (script->count >= UNW_MAX_SCRIPT_LEN) {
- UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
- __FUNCTION__, UNW_MAX_SCRIPT_LEN);
- return;
- }
- script->insn[script->count++] = insn;
-}
-
-static inline void
-emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
-{
- struct unw_reg_info *r = sr->curr.reg + i;
- enum unw_insn_opcode opc;
- struct unw_insn insn;
- unsigned long val = 0;
-
- switch (r->where) {
- case UNW_WHERE_GR:
- if (r->val >= 32) {
- /* register got spilled to a stacked register */
- opc = UNW_INSN_SETNAT_TYPE;
- val = UNW_NAT_REGSTK;
- } else
- /* register got spilled to a scratch register */
- opc = UNW_INSN_SETNAT_MEMSTK;
- break;
-
- case UNW_WHERE_FR:
- opc = UNW_INSN_SETNAT_TYPE;
- val = UNW_NAT_VAL;
- break;
-
- case UNW_WHERE_BR:
- opc = UNW_INSN_SETNAT_TYPE;
- val = UNW_NAT_NONE;
- break;
-
- case UNW_WHERE_PSPREL:
- case UNW_WHERE_SPREL:
- opc = UNW_INSN_SETNAT_MEMSTK;
- break;
-
- default:
- UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
- __FUNCTION__, r->where);
- return;
- }
- insn.opc = opc;
- insn.dst = unw.preg_index[i];
- insn.val = val;
- script_emit(script, insn);
-}
-
-static void
-compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
-{
- struct unw_reg_info *r = sr->curr.reg + i;
- enum unw_insn_opcode opc;
- unsigned long val, rval;
- struct unw_insn insn;
- long need_nat_info;
-
- if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
- return;
-
- opc = UNW_INSN_MOVE;
- val = rval = r->val;
- need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
-
- switch (r->where) {
- case UNW_WHERE_GR:
- if (rval >= 32) {
- opc = UNW_INSN_MOVE_STACKED;
- val = rval - 32;
- } else if (rval >= 4 && rval <= 7) {
- if (need_nat_info) {
- opc = UNW_INSN_MOVE2;
- need_nat_info = 0;
- }
- val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
- } else if (rval == 0) {
- opc = UNW_INSN_MOVE_CONST;
- val = 0;
- } else {
- /* register got spilled to a scratch register */
- opc = UNW_INSN_MOVE_SCRATCH;
- val = pt_regs_off(rval);
- }
- break;
-
- case UNW_WHERE_FR:
- if (rval <= 5)
- val = unw.preg_index[UNW_REG_F2 + (rval - 2)];
- else if (rval >= 16 && rval <= 31)
- val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
- else {
- opc = UNW_INSN_MOVE_SCRATCH;
- if (rval <= 11)
- val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
- else
- UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
- __FUNCTION__, rval);
- }
- break;
-
- case UNW_WHERE_BR:
- if (rval >= 1 && rval <= 5)
- val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
- else {
- opc = UNW_INSN_MOVE_SCRATCH;
- if (rval == 0)
- val = offsetof(struct pt_regs, b0);
- else if (rval == 6)
- val = offsetof(struct pt_regs, b6);
- else
- val = offsetof(struct pt_regs, b7);
- }
- break;
-
- case UNW_WHERE_SPREL:
- opc = UNW_INSN_ADD_SP;
- break;
-
- case UNW_WHERE_PSPREL:
- opc = UNW_INSN_ADD_PSP;
- break;
-
- default:
- UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
- __FUNCTION__, i, r->where);
- break;
- }
- insn.opc = opc;
- insn.dst = unw.preg_index[i];
- insn.val = val;
- script_emit(script, insn);
- if (need_nat_info)
- emit_nat_info(sr, i, script);
-
- if (i == UNW_REG_PSP) {
- /*
- * info->psp must contain the _value_ of the previous
- * sp, not it's save location. We get this by
- * dereferencing the value we just stored in
- * info->psp:
- */
- insn.opc = UNW_INSN_LOAD;
- insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
- script_emit(script, insn);
- }
-}
-
-static inline const struct unw_table_entry *
-lookup (struct unw_table *table, unsigned long rel_ip)
-{
- const struct unw_table_entry *e = NULL;
- unsigned long lo, hi, mid;
-
- /* do a binary search for right entry: */
- for (lo = 0, hi = table->length; lo < hi; ) {
- mid = (lo + hi) / 2;
- e = &table->array[mid];
- if (rel_ip < e->start_offset)
- hi = mid;
- else if (rel_ip >= e->end_offset)
- lo = mid + 1;
- else
- break;
- }
- if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
- return NULL;
- return e;
-}
-
-/*
- * Build an unwind script that unwinds from state OLD_STATE to the
- * entrypoint of the function that called OLD_STATE.
- */
-static inline struct unw_script *
-build_script (struct unw_frame_info *info)
-{
- const struct unw_table_entry *e = NULL;
- struct unw_script *script = NULL;
- struct unw_labeled_state *ls, *next;
- unsigned long ip = info->ip;
- struct unw_state_record sr;
- struct unw_table *table;
- struct unw_reg_info *r;
- struct unw_insn insn;
- u8 *dp, *desc_end;
- u64 hdr;
- int i;
- STAT(unsigned long start, parse_start;)
-
- STAT(++unw.stat.script.builds; start = ia64_get_itc());
-
- /* build state record */
- memset(&sr, 0, sizeof(sr));
- for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
- r->when = UNW_WHEN_NEVER;
- sr.pr_val = info->pr;
-
- UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
- script = script_new(ip);
- if (!script) {
- UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
- STAT(unw.stat.script.build_time += ia64_get_itc() - start);
- return NULL;
- }
- unw.cache[info->prev_script].hint = script - unw.cache;
-
- /* search the kernels and the modules' unwind tables for IP: */
-
- STAT(parse_start = ia64_get_itc());
-
- for (table = unw.tables; table; table = table->next) {
- if (ip >= table->start && ip < table->end) {
- e = lookup(table, ip - table->segment_base);
- break;
- }
- }
- if (!e) {
- /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
- UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
- __FUNCTION__, ip, unw.cache[info->prev_script].ip);
- sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
- sr.curr.reg[UNW_REG_RP].when = -1;
- sr.curr.reg[UNW_REG_RP].val = 0;
- compile_reg(&sr, UNW_REG_RP, script);
- script_finalize(script, &sr);
- STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
- STAT(unw.stat.script.build_time += ia64_get_itc() - start);
- return script;
- }
-
- sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
- + (ip & 0xfUL));
- hdr = *(u64 *) (table->segment_base + e->info_offset);
- dp = (u8 *) (table->segment_base + e->info_offset + 8);
- desc_end = dp + 8*UNW_LENGTH(hdr);
-
- while (!sr.done && dp < desc_end)
- dp = unw_decode(dp, sr.in_body, &sr);
-
- if (sr.when_target > sr.epilogue_start) {
- /*
- * sp has been restored and all values on the memory stack below
- * psp also have been restored.
- */
- sr.curr.reg[UNW_REG_PSP].val = 0;
- sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
- sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
- for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
- if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
- || r->where == UNW_WHERE_SPREL)
- {
- r->val = 0;
- r->where = UNW_WHERE_NONE;
- r->when = UNW_WHEN_NEVER;
- }
- }
-
- script->flags = sr.flags;
-
- /*
- * If RP did't get saved, generate entry for the return link
- * register.
- */
- if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
- sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
- sr.curr.reg[UNW_REG_RP].when = -1;
- sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
- UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
- __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
- sr.curr.reg[UNW_REG_RP].val);
- }
-
-#ifdef UNW_DEBUG
- UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
- __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
- for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
- if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
- UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
- switch (r->where) {
- case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
- case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
- case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
- case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
- case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
- case UNW_WHERE_NONE:
- UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
- break;
-
- default:
- UNW_DPRINT(1, "BADWHERE(%d)", r->where);
- break;
- }
- UNW_DPRINT(1, "\t\t%d\n", r->when);
- }
- }
-#endif
-
- STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
-
- /* translate state record into unwinder instructions: */
-
- /*
- * First, set psp if we're dealing with a fixed-size frame;
- * subsequent instructions may depend on this value.
- */
- if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
- && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
- && sr.curr.reg[UNW_REG_PSP].val != 0) {
- /* new psp is sp plus frame size */
- insn.opc = UNW_INSN_ADD;
- insn.dst = offsetof(struct unw_frame_info, psp)/8;
- insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
- script_emit(script, insn);
- }
-
- /* determine where the primary UNaT is: */
- if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
- i = UNW_REG_PRI_UNAT_MEM;
- else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
- i = UNW_REG_PRI_UNAT_GR;
- else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
- i = UNW_REG_PRI_UNAT_MEM;
- else
- i = UNW_REG_PRI_UNAT_GR;
-
- compile_reg(&sr, i, script);
-
- for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
- compile_reg(&sr, i, script);
-
- /* free labeled register states & stack: */
-
- STAT(parse_start = ia64_get_itc());
- for (ls = sr.labeled_states; ls; ls = next) {
- next = ls->next;
- free_state_stack(&ls->saved_state);
- free_labeled_state(ls);
- }
- free_state_stack(&sr.curr);
- STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
-
- script_finalize(script, &sr);
- STAT(unw.stat.script.build_time += ia64_get_itc() - start);
- return script;
-}
-
-/*
- * Apply the unwinding actions represented by OPS and update SR to
- * reflect the state that existed upon entry to the function that this
- * unwinder represents.
- */
-static inline void
-run_script (struct unw_script *script, struct unw_frame_info *state)
-{
- struct unw_insn *ip, *limit, next_insn;
- unsigned long opc, dst, val, off;
- unsigned long *s = (unsigned long *) state;
- STAT(unsigned long start;)
-
- STAT(++unw.stat.script.runs; start = ia64_get_itc());
- state->flags = script->flags;
- ip = script->insn;
- limit = script->insn + script->count;
- next_insn = *ip;
-
- while (ip++ < limit) {
- opc = next_insn.opc;
- dst = next_insn.dst;
- val = next_insn.val;
- next_insn = *ip;
-
- redo:
- switch (opc) {
- case UNW_INSN_ADD:
- s[dst] += val;
- break;
-
- case UNW_INSN_MOVE2:
- if (!s[val])
- goto lazy_init;
- s[dst+1] = s[val+1];
- s[dst] = s[val];
- break;
-
- case UNW_INSN_MOVE:
- if (!s[val])
- goto lazy_init;
- s[dst] = s[val];
- break;
-
- case UNW_INSN_MOVE_SCRATCH:
- if (state->pt) {
- s[dst] = (unsigned long) get_scratch_regs(state) + val;
- } else {
- s[dst] = 0;
- UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
- __FUNCTION__, dst, val);
- }
- break;
-
- case UNW_INSN_MOVE_CONST:
- if (val == 0)
- s[dst] = (unsigned long) &unw.r0;
- else {
- s[dst] = 0;
- UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
- __FUNCTION__, val);
- }
- break;
-
-
- case UNW_INSN_MOVE_STACKED:
- s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
- val);
- break;
-
- case UNW_INSN_ADD_PSP:
- s[dst] = state->psp + val;
- break;
-
- case UNW_INSN_ADD_SP:
- s[dst] = state->sp + val;
- break;
-
- case UNW_INSN_SETNAT_MEMSTK:
- if (!state->pri_unat_loc)
- state->pri_unat_loc = &state->sw->caller_unat;
- /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
- s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
- break;
-
- case UNW_INSN_SETNAT_TYPE:
- s[dst+1] = val;
- break;
-
- case UNW_INSN_LOAD:
-#ifdef UNW_DEBUG
- if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
-#ifndef XEN
- || s[val] < TASK_SIZE
-#endif
- )
- {
- UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
- __FUNCTION__, s[val]);
- break;
- }
-#endif
- s[dst] = *(unsigned long *) s[val];
- break;
- }
- }
- STAT(unw.stat.script.run_time += ia64_get_itc() - start);
- return;
-
- lazy_init:
- off = unw.sw_off[val];
- s[val] = (unsigned long) state->sw + off;
- if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
- /*
- * We're initializing a general register: init NaT info, too. Note that
- * the offset is a multiple of 8 which gives us the 3 bits needed for
- * the type field.
- */
- s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
- goto redo;
-}
-
-#ifdef XEN
-static inline int
-is_hypervisor_virt(unsigned long addr)
-{
- return IS_VMM_ADDRESS(addr) &&
- (HYPERVISOR_VIRT_START <= addr) &&
- (addr < HYPERVISOR_VIRT_END);
-}
-#endif
-
-static int
-find_save_locs (struct unw_frame_info *info)
-{
- int have_write_lock = 0;
- struct unw_script *scr;
- unsigned long flags = 0;
-
- if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf))
-#ifndef XEN
- || info->ip < TASK_SIZE
-#else
- || !is_hypervisor_virt(info->ip)
-#endif
- ) {
- /* don't let obviously bad addresses pollute the cache */
- /* FIXME: should really be level 0 but it occurs too often. KAO */
- UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
- info->rp_loc = NULL;
- return -1;
- }
-
- scr = script_lookup(info);
- if (!scr) {
- spin_lock_irqsave(&unw.lock, flags);
- scr = build_script(info);
- if (!scr) {
- spin_unlock_irqrestore(&unw.lock, flags);
- UNW_DPRINT(0,
- "unwind.%s: failed to locate/build unwind script for ip %lx\n",
- __FUNCTION__, info->ip);
- return -1;
- }
- have_write_lock = 1;
- }
- info->hint = scr->hint;
- info->prev_script = scr - unw.cache;
-
- run_script(scr, info);
-
- if (have_write_lock) {
- write_unlock(&scr->lock);
- spin_unlock_irqrestore(&unw.lock, flags);
- } else
- read_unlock(&scr->lock);
- return 0;
-}
-
-int
-unw_unwind (struct unw_frame_info *info)
-{
- unsigned long prev_ip, prev_sp, prev_bsp;
- unsigned long ip, pr, num_regs;
- STAT(unsigned long start, flags;)
- int retval;
-
- STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
-
- prev_ip = info->ip;
- prev_sp = info->sp;
- prev_bsp = info->bsp;
-
- /* restore the ip */
- if (!info->rp_loc) {
- /* FIXME: should really be level 0 but it occurs too often. KAO */
- UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
- __FUNCTION__, info->ip);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
- ip = info->ip = *info->rp_loc;
-#ifndef XEN
- if (ip < GATE_ADDR) {
-#else
- if (!is_hypervisor_virt(info->ip)) {
-#endif
- UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
-
- /* restore the cfm: */
- if (!info->pfs_loc) {
- UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
- info->cfm_loc = info->pfs_loc;
-
- /* restore the bsp: */
- pr = info->pr;
- num_regs = 0;
- if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
- info->pt = info->sp + 16;
- if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
- num_regs = *info->cfm_loc & 0x7f; /* size of frame */
- info->pfs_loc =
- (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
- UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
- } else
- num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
- info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
- if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
- UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
- __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
-
- /* restore the sp: */
- info->sp = info->psp;
- if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
- UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
- __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
-
- if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
- UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
- __FUNCTION__, ip);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return -1;
- }
-
- /* as we unwind, the saved ar.unat becomes the primary unat: */
- info->pri_unat_loc = info->unat_loc;
-
- /* finally, restore the predicates: */
- unw_get_pr(info, &info->pr);
-
- retval = find_save_locs(info);
- STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
- return retval;
-}
-EXPORT_SYMBOL(unw_unwind);
-
-int
-unw_unwind_to_user (struct unw_frame_info *info)
-{
- unsigned long ip, sp, pr = 0;
-
- while (unw_unwind(info) >= 0) {
- unw_get_sp(info, &sp);
- if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
- < IA64_PT_REGS_SIZE) {
- UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
- __FUNCTION__);
- break;
- }
-#ifndef XEN
- if (unw_is_intr_frame(info) &&
- (pr & (1UL << PRED_USER_STACK)))
- return 0;
-#else
- if (unw_is_intr_frame(info) &&
- !is_hvm_vcpu(info->task) &&
- (pr & (1UL << PRED_USER_STACK)))
- return 0;
- /*
- * vmx fault handlers don't vcpu->on_stack and keep
- * (pr & (1UL << PRED_USER_STACK)) condition untouched.
- * we need to stop unwinding somehow.
- */
- if (unw_is_intr_frame(info) &&
- is_hvm_vcpu(info->task) &&
- info->pr_loc == &vcpu_regs(info->task)->pr)
- return 0;
-#endif
- if (unw_get_pr (info, &pr) < 0) {
- unw_get_rp(info, &ip);
- UNW_DPRINT(0, "unwind.%s: failed to read "
- "predicate register (ip=0x%lx)\n",
- __FUNCTION__, ip);
- return -1;
- }
- }
- unw_get_ip(info, &ip);
- UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
- __FUNCTION__, ip);
- return -1;
-}
-EXPORT_SYMBOL(unw_unwind_to_user);
-
-static void
-init_frame_info (struct unw_frame_info *info, struct task_struct *t,
- struct switch_stack *sw, unsigned long stktop)
-{
- unsigned long rbslimit, rbstop, stklimit;
- STAT(unsigned long start, flags;)
-
- STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
-
- /*
- * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
- * don't want to do that because it would be slow as each preserved register would
- * have to be processed. Instead, what we do here is zero out the frame info and
- * start the unwind process at the function that created the switch_stack frame.
- * When a preserved value in switch_stack needs to be accessed, run_script() will
- * initialize the appropriate pointer on demand.
- */
- memset(info, 0, sizeof(*info));
-
- rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
- rbstop = sw->ar_bspstore;
- if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
- rbstop = rbslimit;
-
- stklimit = (unsigned long) t + IA64_STK_OFFSET;
- if (stktop <= rbstop)
- stktop = rbstop;
-
- info->regstk.limit = rbslimit;
- info->regstk.top = rbstop;
- info->memstk.limit = stklimit;
- info->memstk.top = stktop;
- info->task = t;
- info->sw = sw;
- info->sp = info->psp = stktop;
- info->pr = sw->pr;
- UNW_DPRINT(3, "unwind.%s:\n"
- " task 0x%lx\n"
- " rbs = [0x%lx-0x%lx)\n"
- " stk = [0x%lx-0x%lx)\n"
- " pr 0x%lx\n"
- " sw 0x%lx\n"
- " sp 0x%lx\n",
- __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
- info->pr, (unsigned long) info->sw, info->sp);
- STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
-}
-
-void
-unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
- struct pt_regs *pt, struct switch_stack *sw)
-{
- unsigned long sof;
-
- init_frame_info(info, t, sw, pt->r12);
- info->cfm_loc = &pt->cr_ifs;
- info->unat_loc = &pt->ar_unat;
- info->pfs_loc = &pt->ar_pfs;
- sof = *info->cfm_loc & 0x7f;
- info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
- info->ip = pt->cr_iip + ia64_psr(pt)->ri;
- info->pt = (unsigned long) pt;
- UNW_DPRINT(3, "unwind.%s:\n"
- " bsp 0x%lx\n"
- " sof 0x%lx\n"
- " ip 0x%lx\n",
- __FUNCTION__, info->bsp, sof, info->ip);
- find_save_locs(info);
-}
-
-void
-unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
-{
- unsigned long sol;
-
- init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
- info->cfm_loc = &sw->ar_pfs;
- sol = (*info->cfm_loc >> 7) & 0x7f;
- info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
- info->ip = sw->b0;
- UNW_DPRINT(3, "unwind.%s:\n"
- " bsp 0x%lx\n"
- " sol 0x%lx\n"
- " ip 0x%lx\n",
- __FUNCTION__, info->bsp, sol, info->ip);
- find_save_locs(info);
-}
-
-EXPORT_SYMBOL(unw_init_frame_info);
-
-void
-unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
-{
-#ifdef XEN
- struct switch_stack *sw = (struct switch_stack *) (t->arch._thread.ksp + 16);
-#else
- struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
-#endif
-
- UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
- unw_init_frame_info(info, t, sw);
-}
-EXPORT_SYMBOL(unw_init_from_blocked_task);
-
-static void
-init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
- unsigned long gp, const void *table_start, const void *table_end)
-{
- const struct unw_table_entry *start = table_start, *end = table_end;
-
- table->name = name;
- table->segment_base = segment_base;
- table->gp = gp;
- table->start = segment_base + start[0].start_offset;
- table->end = segment_base + end[-1].end_offset;
- table->array = start;
- table->length = end - start;
-}
-
-#ifndef XEN
-void *
-unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
- const void *table_start, const void *table_end)
-{
- const struct unw_table_entry *start = table_start, *end = table_end;
- struct unw_table *table;
- unsigned long flags;
-
- if (end - start <= 0) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
- __FUNCTION__);
- return NULL;
- }
-
- table = kmalloc(sizeof(*table), GFP_USER);
- if (!table)
- return NULL;
-
- init_unwind_table(table, name, segment_base, gp, table_start, table_end);
-
- spin_lock_irqsave(&unw.lock, flags);
- {
- /* keep kernel unwind table at the front (it's searched most commonly): */
- table->next = unw.tables->next;
- unw.tables->next = table;
- }
- spin_unlock_irqrestore(&unw.lock, flags);
-
- return table;
-}
-
-void
-unw_remove_unwind_table (void *handle)
-{
- struct unw_table *table, *prev;
- struct unw_script *tmp;
- unsigned long flags;
- long index;
-
- if (!handle) {
- UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
- __FUNCTION__);
- return;
- }
-
- table = handle;
- if (table == &unw.kernel_table) {
- UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
- "no-can-do!\n", __FUNCTION__);
- return;
- }
-
- spin_lock_irqsave(&unw.lock, flags);
- {
- /* first, delete the table: */
-
- for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
- if (prev->next == table)
- break;
- if (!prev) {
- UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
- __FUNCTION__, (void *) table);
- spin_unlock_irqrestore(&unw.lock, flags);
- return;
- }
- prev->next = table->next;
- }
- spin_unlock_irqrestore(&unw.lock, flags);
-
- /* next, remove hash table entries for this table */
-
- for (index = 0; index <= UNW_HASH_SIZE; ++index) {
- tmp = unw.cache + unw.hash[index];
- if (unw.hash[index] >= UNW_CACHE_SIZE
- || tmp->ip < table->start || tmp->ip >= table->end)
- continue;
-
- write_lock(&tmp->lock);
- {
- if (tmp->ip >= table->start && tmp->ip < table->end) {
- unw.hash[index] = tmp->coll_chain;
- tmp->ip = 0;
- }
- }
- write_unlock(&tmp->lock);
- }
-
- kfree(table);
-}
-
-static int __init
-create_gate_table (void)
-{
- const struct unw_table_entry *entry, *start, *end;
- unsigned long *lp, segbase = GATE_ADDR;
- size_t info_size, size;
- char *info;
- Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
- int i;
-
- for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
- if (phdr->p_type == PT_IA_64_UNWIND) {
- punw = phdr;
- break;
- }
-
- if (!punw) {
- printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
- return 0;
- }
-
- start = (const struct unw_table_entry *) punw->p_vaddr;
- end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
- size = 0;
-
- unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
-
- for (entry = start; entry < end; ++entry)
- size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
- size += 8; /* reserve space for "end of table" marker */
-
- unw.gate_table = kmalloc(size, GFP_KERNEL);
- if (!unw.gate_table) {
- unw.gate_table_size = 0;
- printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
- return 0;
- }
- unw.gate_table_size = size;
-
- lp = unw.gate_table;
- info = (char *) unw.gate_table + size;
-
- for (entry = start; entry < end; ++entry, lp += 3) {
- info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
- info -= info_size;
- memcpy(info, (char *) segbase + entry->info_offset, info_size);
-
- lp[0] = segbase + entry->start_offset; /* start */
- lp[1] = segbase + entry->end_offset; /* end */
- lp[2] = info - (char *) unw.gate_table; /* info */
- }
- *lp = 0; /* end-of-table marker */
- return 0;
-}
-
-__initcall(create_gate_table);
-#endif // !XEN
-
-void __init
-unw_init (void)
-{
- extern char __gp[];
- extern void unw_hash_index_t_is_too_narrow (void);
- long i, off;
-
- if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
- unw_hash_index_t_is_too_narrow();
-
- unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
- unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
- unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
- unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
- unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
- unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
- unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
- unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
- for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
- unw.sw_off[unw.preg_index[i]] = off;
- for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
- unw.sw_off[unw.preg_index[i]] = off;
- for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
- unw.sw_off[unw.preg_index[i]] = off;
- for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
- unw.sw_off[unw.preg_index[i]] = off;
-
- for (i = 0; i < UNW_CACHE_SIZE; ++i) {
- if (i > 0)
- unw.cache[i].lru_chain = (i - 1);
- unw.cache[i].coll_chain = -1;
- rwlock_init(&unw.cache[i].lock);
- }
- unw.lru_head = UNW_CACHE_SIZE - 1;
- unw.lru_tail = 0;
-
- init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
- __start_unwind, __end_unwind);
-}
-
-#ifndef XEN
-/*
- * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
- *
- * This system call has been deprecated. The new and improved way to get
- * at the kernel's unwind info is via the gate DSO. The address of the
- * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
- *
- * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
- *
- * This system call copies the unwind data into the buffer pointed to by BUF and returns
- * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
- * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
- * unwind data.
- *
- * The first portion of the unwind data contains an unwind table and rest contains the
- * associated unwind info (in no particular order). The unwind table consists of a table
- * of entries of the form:
- *
- * u64 start; (64-bit address of start of function)
- * u64 end; (64-bit address of start of function)
- * u64 info; (BUF-relative offset to unwind info)
- *
- * The end of the unwind table is indicated by an entry with a START address of zero.
- *
- * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
- * on the format of the unwind info.
- *
- * ERRORS
- * EFAULT BUF points outside your accessible address space.
- */
-asmlinkage long
-sys_getunwind (void __user *buf, size_t buf_size)
-{
- if (buf && buf_size >= unw.gate_table_size)
- if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
- return -EFAULT;
- return unw.gate_table_size;
-}
-#endif
diff --git a/xen/arch/ia64/linux-xen/unwind_decoder.c b/xen/arch/ia64/linux-xen/unwind_decoder.c
deleted file mode 100644
index 50ac2d82f9..0000000000
--- a/xen/arch/ia64/linux-xen/unwind_decoder.c
+++ /dev/null
@@ -1,459 +0,0 @@
-/*
- * Copyright (C) 2000 Hewlett-Packard Co
- * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Generic IA-64 unwind info decoder.
- *
- * This file is used both by the Linux kernel and objdump. Please keep
- * the two copies of this file in sync.
- *
- * You need to customize the decoder by defining the following
- * macros/constants before including this file:
- *
- * Types:
- * unw_word Unsigned integer type with at least 64 bits
- *
- * Register names:
- * UNW_REG_BSP
- * UNW_REG_BSPSTORE
- * UNW_REG_FPSR
- * UNW_REG_LC
- * UNW_REG_PFS
- * UNW_REG_PR
- * UNW_REG_RNAT
- * UNW_REG_PSP
- * UNW_REG_RP
- * UNW_REG_UNAT
- *
- * Decoder action macros:
- * UNW_DEC_BAD_CODE(code)
- * UNW_DEC_ABI(fmt,abi,context,arg)
- * UNW_DEC_BR_GR(fmt,brmask,gr,arg)
- * UNW_DEC_BR_MEM(fmt,brmask,arg)
- * UNW_DEC_COPY_STATE(fmt,label,arg)
- * UNW_DEC_EPILOGUE(fmt,t,ecount,arg)
- * UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg)
- * UNW_DEC_FR_MEM(fmt,frmask,arg)
- * UNW_DEC_GR_GR(fmt,grmask,gr,arg)
- * UNW_DEC_GR_MEM(fmt,grmask,arg)
- * UNW_DEC_LABEL_STATE(fmt,label,arg)
- * UNW_DEC_MEM_STACK_F(fmt,t,size,arg)
- * UNW_DEC_MEM_STACK_V(fmt,t,arg)
- * UNW_DEC_PRIUNAT_GR(fmt,r,arg)
- * UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)
- * UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)
- * UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg)
- * UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg)
- * UNW_DEC_PROLOGUE(fmt,body,rlen,arg)
- * UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg)
- * UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg)
- * UNW_DEC_REG_REG(fmt,src,dst,arg)
- * UNW_DEC_REG_SPREL(fmt,reg,spoff,arg)
- * UNW_DEC_REG_WHEN(fmt,reg,t,arg)
- * UNW_DEC_RESTORE(fmt,t,abreg,arg)
- * UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg)
- * UNW_DEC_SPILL_BASE(fmt,pspoff,arg)
- * UNW_DEC_SPILL_MASK(fmt,imaskp,arg)
- * UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg)
- * UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg)
- * UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg)
- * UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg)
- * UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg)
- * UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg)
- */
-
-static unw_word
-unw_decode_uleb128 (unsigned char **dpp)
-{
- unsigned shift = 0;
- unw_word byte, result = 0;
- unsigned char *bp = *dpp;
-
- while (1)
- {
- byte = *bp++;
- result |= (byte & 0x7f) << shift;
- if ((byte & 0x80) == 0)
- break;
- shift += 7;
- }
- *dpp = bp;
- return result;
-}
-
-static unsigned char *
-unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char byte1, abreg;
- unw_word t, off;
-
- byte1 = *dp++;
- t = unw_decode_uleb128 (&dp);
- off = unw_decode_uleb128 (&dp);
- abreg = (byte1 & 0x7f);
- if (byte1 & 0x80)
- UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg);
- else
- UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char byte1, byte2, abreg, x, ytreg;
- unw_word t;
-
- byte1 = *dp++; byte2 = *dp++;
- t = unw_decode_uleb128 (&dp);
- abreg = (byte1 & 0x7f);
- ytreg = byte2;
- x = (byte1 >> 7) & 1;
- if ((byte1 & 0x80) == 0 && ytreg == 0)
- UNW_DEC_RESTORE(X2, t, abreg, arg);
- else
- UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char byte1, byte2, abreg, qp;
- unw_word t, off;
-
- byte1 = *dp++; byte2 = *dp++;
- t = unw_decode_uleb128 (&dp);
- off = unw_decode_uleb128 (&dp);
-
- qp = (byte1 & 0x3f);
- abreg = (byte2 & 0x7f);
-
- if (byte1 & 0x80)
- UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg);
- else
- UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg;
- unw_word t;
-
- byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
- t = unw_decode_uleb128 (&dp);
-
- qp = (byte1 & 0x3f);
- abreg = (byte2 & 0x7f);
- x = (byte2 >> 7) & 1;
- ytreg = byte3;
-
- if ((byte2 & 0x80) == 0 && byte3 == 0)
- UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg);
- else
- UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg)
-{
- int body = (code & 0x20) != 0;
- unw_word rlen;
-
- rlen = (code & 0x1f);
- UNW_DEC_PROLOGUE(R1, body, rlen, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char byte1, mask, grsave;
- unw_word rlen;
-
- byte1 = *dp++;
-
- mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
- grsave = (byte1 & 0x7f);
- rlen = unw_decode_uleb128 (&dp);
- UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg)
-{
- unw_word rlen;
-
- rlen = unw_decode_uleb128 (&dp);
- UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char brmask = (code & 0x1f);
-
- UNW_DEC_BR_MEM(P1, brmask, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg)
-{
- if ((code & 0x10) == 0)
- {
- unsigned char byte1 = *dp++;
-
- UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1),
- (byte1 & 0x7f), arg);
- }
- else if ((code & 0x08) == 0)
- {
- unsigned char byte1 = *dp++, r, dst;
-
- r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
- dst = (byte1 & 0x7f);
- switch (r)
- {
- case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break;
- case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break;
- case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break;
- case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break;
- case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break;
- case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break;
- case 6: UNW_DEC_RP_BR(P3, dst, arg); break;
- case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break;
- case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break;
- case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break;
- case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break;
- case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break;
- default: UNW_DEC_BAD_CODE(r); break;
- }
- }
- else if ((code & 0x7) == 0)
- UNW_DEC_SPILL_MASK(P4, dp, arg);
- else if ((code & 0x7) == 1)
- {
- unw_word grmask, frmask, byte1, byte2, byte3;
-
- byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
- grmask = ((byte1 >> 4) & 0xf);
- frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3;
- UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg);
- }
- else
- UNW_DEC_BAD_CODE(code);
- return dp;
-}
-
-static unsigned char *
-unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg)
-{
- int gregs = (code & 0x10) != 0;
- unsigned char mask = (code & 0x0f);
-
- if (gregs)
- UNW_DEC_GR_MEM(P6, mask, arg);
- else
- UNW_DEC_FR_MEM(P6, mask, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg)
-{
- unsigned char r, byte1, byte2;
- unw_word t, size;
-
- if ((code & 0x10) == 0)
- {
- r = (code & 0xf);
- t = unw_decode_uleb128 (&dp);
- switch (r)
- {
- case 0:
- size = unw_decode_uleb128 (&dp);
- UNW_DEC_MEM_STACK_F(P7, t, size, arg);
- break;
-
- case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break;
- case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break;
- case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break;
- case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break;
- case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break;
- case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break;
- case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break;
- case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break;
- case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break;
- case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break;
- case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break;
- case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break;
- case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break;
- case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break;
- case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break;
- default: UNW_DEC_BAD_CODE(r); break;
- }
- }
- else
- {
- switch (code & 0xf)
- {
- case 0x0: /* p8 */
- {
- r = *dp++;
- t = unw_decode_uleb128 (&dp);
- switch (r)
- {
- case 1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break;
- case 2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break;
- case 3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break;
- case 4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break;
- case 5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break;
- case 6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break;
- case 7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break;
- case 8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break;
- case 9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break;
- case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break;
- case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
- case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
- case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break;
- case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break;
- case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break;
- case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break;
- case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break;
- case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break;
- case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break;
- default: UNW_DEC_BAD_CODE(r); break;
- }
- }
- break;
-
- case 0x1:
- byte1 = *dp++; byte2 = *dp++;
- UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg);
- break;
-
- case 0xf: /* p10 */
- byte1 = *dp++; byte2 = *dp++;
- UNW_DEC_ABI(P10, byte1, byte2, arg);
- break;
-
- case 0x9:
- return unw_decode_x1 (dp, code, arg);
-
- case 0xa:
- return unw_decode_x2 (dp, code, arg);
-
- case 0xb:
- return unw_decode_x3 (dp, code, arg);
-
- case 0xc:
- return unw_decode_x4 (dp, code, arg);
-
- default:
- UNW_DEC_BAD_CODE(code);
- break;
- }
- }
- return dp;
-}
-
-static unsigned char *
-unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg)
-{
- unw_word label = (code & 0x1f);
-
- if ((code & 0x20) != 0)
- UNW_DEC_COPY_STATE(B1, label, arg);
- else
- UNW_DEC_LABEL_STATE(B1, label, arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg)
-{
- unw_word t;
-
- t = unw_decode_uleb128 (&dp);
- UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg);
- return dp;
-}
-
-static unsigned char *
-unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg)
-{
- unw_word t, ecount, label;
-
- if ((code & 0x10) == 0)
- {
- t = unw_decode_uleb128 (&dp);
- ecount = unw_decode_uleb128 (&dp);
- UNW_DEC_EPILOGUE(B3, t, ecount, arg);
- }
- else if ((code & 0x07) == 0)
- {
- label = unw_decode_uleb128 (&dp);
- if ((code & 0x08) != 0)
- UNW_DEC_COPY_STATE(B4, label, arg);
- else
- UNW_DEC_LABEL_STATE(B4, label, arg);
- }
- else
- switch (code & 0x7)
- {
- case 1: return unw_decode_x1 (dp, code, arg);
- case 2: return unw_decode_x2 (dp, code, arg);
- case 3: return unw_decode_x3 (dp, code, arg);
- case 4: return unw_decode_x4 (dp, code, arg);
- default: UNW_DEC_BAD_CODE(code); break;
- }
- return dp;
-}
-
-typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *);
-
-static unw_decoder unw_decode_table[2][8] =
-{
- /* prologue table: */
- {
- unw_decode_r1, /* 0 */
- unw_decode_r1,
- unw_decode_r2,
- unw_decode_r3,
- unw_decode_p1, /* 4 */
- unw_decode_p2_p5,
- unw_decode_p6,
- unw_decode_p7_p10
- },
- {
- unw_decode_r1, /* 0 */
- unw_decode_r1,
- unw_decode_r2,
- unw_decode_r3,
- unw_decode_b1, /* 4 */
- unw_decode_b1,
- unw_decode_b2,
- unw_decode_b3_x4
- }
-};
-
-/*
- * Decode one descriptor and return address of next descriptor.
- */
-static inline unsigned char *
-unw_decode (unsigned char *dp, int inside_body, void *arg)
-{
- unw_decoder decoder;
- unsigned char code;
-
- code = *dp++;
- decoder = unw_decode_table[inside_body][code >> 5];
- dp = (*decoder) (dp, code, arg);
- return dp;
-}
diff --git a/xen/arch/ia64/linux-xen/unwind_i.h b/xen/arch/ia64/linux-xen/unwind_i.h
deleted file mode 100644
index 96693a6ae3..0000000000
--- a/xen/arch/ia64/linux-xen/unwind_i.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Kernel unwind support.
- */
-
-#define UNW_VER(x) ((x) >> 48)
-#define UNW_FLAG_MASK 0x0000ffff00000000
-#define UNW_FLAG_OSMASK 0x0000f00000000000
-#define UNW_FLAG_EHANDLER(x) ((x) & 0x0000000100000000L)
-#define UNW_FLAG_UHANDLER(x) ((x) & 0x0000000200000000L)
-#define UNW_LENGTH(x) ((x) & 0x00000000ffffffffL)
-
-enum unw_register_index {
- /* primary unat: */
- UNW_REG_PRI_UNAT_GR,
- UNW_REG_PRI_UNAT_MEM,
-
- /* register stack */
- UNW_REG_BSP, /* register stack pointer */
- UNW_REG_BSPSTORE,
- UNW_REG_PFS, /* previous function state */
- UNW_REG_RNAT,
- /* memory stack */
- UNW_REG_PSP, /* previous memory stack pointer */
- /* return pointer: */
- UNW_REG_RP,
-
- /* preserved registers: */
- UNW_REG_R4, UNW_REG_R5, UNW_REG_R6, UNW_REG_R7,
- UNW_REG_UNAT, UNW_REG_PR, UNW_REG_LC, UNW_REG_FPSR,
- UNW_REG_B1, UNW_REG_B2, UNW_REG_B3, UNW_REG_B4, UNW_REG_B5,
- UNW_REG_F2, UNW_REG_F3, UNW_REG_F4, UNW_REG_F5,
- UNW_REG_F16, UNW_REG_F17, UNW_REG_F18, UNW_REG_F19,
- UNW_REG_F20, UNW_REG_F21, UNW_REG_F22, UNW_REG_F23,
- UNW_REG_F24, UNW_REG_F25, UNW_REG_F26, UNW_REG_F27,
- UNW_REG_F28, UNW_REG_F29, UNW_REG_F30, UNW_REG_F31,
- UNW_NUM_REGS
-};
-
-struct unw_info_block {
- u64 header;
- u64 desc[0]; /* unwind descriptors */
- /* personality routine and language-specific data follow behind descriptors */
-};
-
-struct unw_table {
- struct unw_table *next; /* must be first member! */
- const char *name;
- unsigned long gp; /* global pointer for this load-module */
- unsigned long segment_base; /* base for offsets in the unwind table entries */
- unsigned long start;
- unsigned long end;
- const struct unw_table_entry *array;
- unsigned long length;
-};
-
-enum unw_where {
- UNW_WHERE_NONE, /* register isn't saved at all */
- UNW_WHERE_GR, /* register is saved in a general register */
- UNW_WHERE_FR, /* register is saved in a floating-point register */
- UNW_WHERE_BR, /* register is saved in a branch register */
- UNW_WHERE_SPREL, /* register is saved on memstack (sp-relative) */
- UNW_WHERE_PSPREL, /* register is saved on memstack (psp-relative) */
- /*
- * At the end of each prologue these locations get resolved to
- * UNW_WHERE_PSPREL and UNW_WHERE_GR, respectively:
- */
- UNW_WHERE_SPILL_HOME, /* register is saved in its spill home */
- UNW_WHERE_GR_SAVE /* register is saved in next general register */
-};
-
-#define UNW_WHEN_NEVER 0x7fffffff
-
-struct unw_reg_info {
- unsigned long val; /* save location: register number or offset */
- enum unw_where where; /* where the register gets saved */
- int when; /* when the register gets saved */
-};
-
-struct unw_reg_state {
- struct unw_reg_state *next; /* next (outer) element on state stack */
- struct unw_reg_info reg[UNW_NUM_REGS]; /* register save locations */
-};
-
-struct unw_labeled_state {
- struct unw_labeled_state *next; /* next labeled state (or NULL) */
- unsigned long label; /* label for this state */
- struct unw_reg_state saved_state;
-};
-
-struct unw_state_record {
- unsigned int first_region : 1; /* is this the first region? */
- unsigned int done : 1; /* are we done scanning descriptors? */
- unsigned int any_spills : 1; /* got any register spills? */
- unsigned int in_body : 1; /* are we inside a body (as opposed to a prologue)? */
- unsigned long flags; /* see UNW_FLAG_* in unwind.h */
-
- u8 *imask; /* imask of spill_mask record or NULL */
- unsigned long pr_val; /* predicate values */
- unsigned long pr_mask; /* predicate mask */
- long spill_offset; /* psp-relative offset for spill base */
- int region_start;
- int region_len;
- int epilogue_start;
- int epilogue_count;
- int when_target;
-
- u8 gr_save_loc; /* next general register to use for saving a register */
- u8 return_link_reg; /* branch register in which the return link is passed */
-
- struct unw_labeled_state *labeled_states; /* list of all labeled states */
- struct unw_reg_state curr; /* current state */
-};
-
-enum unw_nat_type {
- UNW_NAT_NONE, /* NaT not represented */
- UNW_NAT_VAL, /* NaT represented by NaT value (fp reg) */
- UNW_NAT_MEMSTK, /* NaT value is in unat word at offset OFF */
- UNW_NAT_REGSTK /* NaT is in rnat */
-};
-
-enum unw_insn_opcode {
- UNW_INSN_ADD, /* s[dst] += val */
- UNW_INSN_ADD_PSP, /* s[dst] = (s.psp + val) */
- UNW_INSN_ADD_SP, /* s[dst] = (s.sp + val) */
- UNW_INSN_MOVE, /* s[dst] = s[val] */
- UNW_INSN_MOVE2, /* s[dst] = s[val]; s[dst+1] = s[val+1] */
- UNW_INSN_MOVE_STACKED, /* s[dst] = ia64_rse_skip(*s.bsp, val) */
- UNW_INSN_SETNAT_MEMSTK, /* s[dst+1].nat.type = MEMSTK;
- s[dst+1].nat.off = *s.pri_unat - s[dst] */
- UNW_INSN_SETNAT_TYPE, /* s[dst+1].nat.type = val */
- UNW_INSN_LOAD, /* s[dst] = *s[val] */
- UNW_INSN_MOVE_SCRATCH, /* s[dst] = scratch reg "val" */
- UNW_INSN_MOVE_CONST, /* s[dst] = constant reg "val" */
-};
-
-struct unw_insn {
- unsigned int opc : 4;
- unsigned int dst : 9;
- signed int val : 19;
-};
-
-/*
- * Preserved general static registers (r4-r7) give rise to two script
- * instructions; everything else yields at most one instruction; at
- * the end of the script, the psp gets popped, accounting for one more
- * instruction.
- */
-#define UNW_MAX_SCRIPT_LEN (UNW_NUM_REGS + 5)
-
-struct unw_script {
- unsigned long ip; /* ip this script is for */
- unsigned long pr_mask; /* mask of predicates script depends on */
- unsigned long pr_val; /* predicate values this script is for */
- rwlock_t lock;
- unsigned int flags; /* see UNW_FLAG_* in unwind.h */
- unsigned short lru_chain; /* used for least-recently-used chain */
- unsigned short coll_chain; /* used for hash collisions */
- unsigned short hint; /* hint for next script to try (or -1) */
- unsigned short count; /* number of instructions in script */
- struct unw_insn insn[UNW_MAX_SCRIPT_LEN];
-};
diff --git a/xen/arch/ia64/linux/Makefile b/xen/arch/ia64/linux/Makefile
deleted file mode 100644
index 6a3704b1bf..0000000000
--- a/xen/arch/ia64/linux/Makefile
+++ /dev/null
@@ -1,57 +0,0 @@
-subdir-y += dig
-subdir-y += hp
-subdir-y += sn
-
-obj-y += bitop.o
-obj-y += clear_page.o
-obj-y += clear_user.o
-obj-y += copy_page_mck.o
-obj-y += efi_stub.o
-obj-y += extable.o
-obj-y += flush.o
-obj-y += hpsim.o
-obj-y += linuxextable.o
-obj-y += machvec.o
-obj-y += memcpy_mck.o
-obj-y += memset.o
-obj-y += numa.o
-obj-y += pal.o
-obj-y += strlen.o
-
-obj-y += __divsi3.o
-obj-y += __udivsi3.o
-obj-y += __modsi3.o
-obj-y += __umodsi3.o
-obj-y += __divdi3.o
-obj-y += __udivdi3.o
-obj-y += __moddi3.o
-obj-y += __umoddi3.o
-obj-y += carta_random.o
-obj-y += io.o
-
-## variants of divide/modulo
-## see files in xen/arch/ia64/linux/lib (linux/arch/ia64/lib)
-__divdi3.o: idiv64.S
- $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $@ $<
-
-__udivdi3.o: idiv64.S
- $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DUNSIGNED -c -o $@ $<
-
-__moddi3.o: idiv64.S
- $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -c -o $@ $<
-
-__umoddi3.o: idiv64.S
- $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -DUNSIGNED -c -o $@ $<
-
-__divsi3.o: idiv32.S
- $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $@ $<
-
-__udivsi3.o: idiv32.S
- $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DUNSIGNED -c -o $@ $<
-
-__modsi3.o: idiv32.S
- $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -c -o $@ $<
-
-__umodsi3.o: idiv32.S
- $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -DUNSIGNED -c -o $@ $<
-
diff --git a/xen/arch/ia64/linux/README.origin b/xen/arch/ia64/linux/README.origin
deleted file mode 100644
index 3727fe59d2..0000000000
--- a/xen/arch/ia64/linux/README.origin
+++ /dev/null
@@ -1,34 +0,0 @@
-Source files in this directory are identical copies of linux-2.6.13 files:
-
-NOTE: DO NOT commit changes to these files! If a file
-needs to be changed, move it to ../linux-xen and follow
-the instructions in the README there.
-
-efi_stub.S -> linux/arch/ia64/kernel/efi_stub.S
-extable.c -> linux/arch/ia64/mm/extable.c
-hpsim.S -> linux/arch/ia64/hp/sim/hpsim.S
-linuxextable.c -> linux/kernel/extable.c
-machvec.c -> linux/arch/ia64/kernel/machvec.c
-numa.c -> linux/arch/ia64/mm/numa.c
-patch.c -> linux/arch/ia64/kernel/patch.c
-pcdp.h -> linux/drivers/firmware/pcdp.h
-
-bitop.c -> linux/arch/ia64/lib/bitop.c
-clear_page.S -> linux/arch/ia64/lib/clear_page.S
-clear_user.S -> linux/arch/ia64/lib/clear_user.S
-copy_page_mck.S -> linux/arch/ia64/lib/copy_page_mck.S
-flush.S -> linux/arch/ia64/lib/flush.S
-idiv32.S -> linux/arch/ia64/lib/idiv32.S
-idiv64.S -> linux/arch/ia64/lib/idiv64.S
-memcpy_mck.S -> linux/arch/ia64/lib/memcpy_mck.S
-memset.S -> linux/arch/ia64/lib/memset.S
-strlen.S -> linux/arch/ia64/lib/strlen.S
-
-# The files below are from Linux-2.6.16.33
-carta_random.S -> linux/arch/ia64/lib/carta_random.S
-
-# The files below are from Linux-2.6.19
-io.c -> linux/arch/ia64/lib/io.c
-
-# The files below are from Linux-2.6.21
-pal.S -> linux/arch/ia64/kernel/pal.S
diff --git a/xen/arch/ia64/linux/bitop.c b/xen/arch/ia64/linux/bitop.c
deleted file mode 100644
index 82e299c846..0000000000
--- a/xen/arch/ia64/linux/bitop.c
+++ /dev/null
@@ -1,88 +0,0 @@
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <asm/intrinsics.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-
-/*
- * Find next zero bit in a bitmap reasonably efficiently..
- */
-
-int __find_next_zero_bit (const void *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
- unsigned long result = offset & ~63UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 63UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (64-offset);
- if (size < 64)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 64;
- result += 64;
- }
- while (size & ~63UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 64;
- size -= 64;
- }
- if (!size)
- return result;
- tmp = *p;
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* any bits zero? */
- return result + size; /* nope */
-found_middle:
- return result + ffz(tmp);
-}
-EXPORT_SYMBOL(__find_next_zero_bit);
-
-/*
- * Find next bit in a bitmap reasonably efficiently..
- */
-int __find_next_bit(const void *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
- unsigned long result = offset & ~63UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 63UL;
- if (offset) {
- tmp = *(p++);
- tmp &= ~0UL << offset;
- if (size < 64)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 64;
- result += 64;
- }
- while (size & ~63UL) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += 64;
- size -= 64;
- }
- if (!size)
- return result;
- tmp = *p;
- found_first:
- tmp &= ~0UL >> (64-size);
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
- found_middle:
- return result + __ffs(tmp);
-}
-EXPORT_SYMBOL(__find_next_bit);
diff --git a/xen/arch/ia64/linux/carta_random.S b/xen/arch/ia64/linux/carta_random.S
deleted file mode 100644
index d0674c3603..0000000000
--- a/xen/arch/ia64/linux/carta_random.S
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Fast, simple, yet decent quality random number generator based on
- * a paper by David G. Carta ("Two Fast Implementations of the
- * `Minimal Standard' Random Number Generator," Communications of the
- * ACM, January, 1990).
- *
- * Copyright (C) 2002 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <asm/asmmacro.h>
-
-#define a r2
-#define m r3
-#define lo r8
-#define hi r9
-#define t0 r16
-#define t1 r17
-#define seed r32
-
-GLOBAL_ENTRY(carta_random32)
- movl a = (16807 << 16) | 16807
- ;;
- pmpyshr2.u t0 = a, seed, 0
- pmpyshr2.u t1 = a, seed, 16
- ;;
- unpack2.l t0 = t1, t0
- dep m = -1, r0, 0, 31
- ;;
- zxt4 lo = t0
- shr.u hi = t0, 32
- ;;
- dep t0 = 0, hi, 15, 49 // t0 = (hi & 0x7fff)
- ;;
- shl t0 = t0, 16 // t0 = (hi & 0x7fff) << 16
- shr t1 = hi, 15 // t1 = (hi >> 15)
- ;;
- add lo = lo, t0
- ;;
- cmp.gtu p6, p0 = lo, m
- ;;
-(p6) and lo = lo, m
- ;;
-(p6) add lo = 1, lo
- ;;
- add lo = lo, t1
- ;;
- cmp.gtu p6, p0 = lo, m
- ;;
-(p6) and lo = lo, m
- ;;
-(p6) add lo = 1, lo
- br.ret.sptk.many rp
-END(carta_random32)
diff --git a/xen/arch/ia64/linux/clear_page.S b/xen/arch/ia64/linux/clear_page.S
deleted file mode 100644
index d4987061dd..0000000000
--- a/xen/arch/ia64/linux/clear_page.S
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 1999-2002 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com>
- *
- * 1/06/01 davidm Tuned for Itanium.
- * 2/12/02 kchen Tuned for both Itanium and McKinley
- * 3/08/02 davidm Some more tweaking
- */
-#include <linux/config.h>
-
-#include <asm/asmmacro.h>
-#include <asm/page.h>
-
-#ifdef CONFIG_ITANIUM
-# define L3_LINE_SIZE 64 // Itanium L3 line size
-# define PREFETCH_LINES 9 // magic number
-#else
-# define L3_LINE_SIZE 128 // McKinley L3 line size
-# define PREFETCH_LINES 12 // magic number
-#endif
-
-#define saved_lc r2
-#define dst_fetch r3
-#define dst1 r8
-#define dst2 r9
-#define dst3 r10
-#define dst4 r11
-
-#define dst_last r31
-
-GLOBAL_ENTRY(clear_page)
- .prologue
- .regstk 1,0,0,0
- mov r16 = PAGE_SIZE/L3_LINE_SIZE-1 // main loop count, -1=repeat/until
- .save ar.lc, saved_lc
- mov saved_lc = ar.lc
-
- .body
- mov ar.lc = (PREFETCH_LINES - 1)
- mov dst_fetch = in0
- adds dst1 = 16, in0
- adds dst2 = 32, in0
- ;;
-.fetch: stf.spill.nta [dst_fetch] = f0, L3_LINE_SIZE
- adds dst3 = 48, in0 // executing this multiple times is harmless
- br.cloop.sptk.few .fetch
- ;;
- addl dst_last = (PAGE_SIZE - PREFETCH_LINES*L3_LINE_SIZE), dst_fetch
- mov ar.lc = r16 // one L3 line per iteration
- adds dst4 = 64, in0
- ;;
-#ifdef CONFIG_ITANIUM
- // Optimized for Itanium
-1: stf.spill.nta [dst1] = f0, 64
- stf.spill.nta [dst2] = f0, 64
- cmp.lt p8,p0=dst_fetch, dst_last
- ;;
-#else
- // Optimized for McKinley
-1: stf.spill.nta [dst1] = f0, 64
- stf.spill.nta [dst2] = f0, 64
- stf.spill.nta [dst3] = f0, 64
- stf.spill.nta [dst4] = f0, 128
- cmp.lt p8,p0=dst_fetch, dst_last
- ;;
- stf.spill.nta [dst1] = f0, 64
- stf.spill.nta [dst2] = f0, 64
-#endif
- stf.spill.nta [dst3] = f0, 64
-(p8) stf.spill.nta [dst_fetch] = f0, L3_LINE_SIZE
- br.cloop.sptk.few 1b
- ;;
- mov ar.lc = saved_lc // restore lc
- br.ret.sptk.many rp
-END(clear_page)
diff --git a/xen/arch/ia64/linux/clear_user.S b/xen/arch/ia64/linux/clear_user.S
deleted file mode 100644
index eecd8577b2..0000000000
--- a/xen/arch/ia64/linux/clear_user.S
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * This routine clears to zero a linear memory buffer in user space.
- *
- * Inputs:
- * in0: address of buffer
- * in1: length of buffer in bytes
- * Outputs:
- * r8: number of bytes that didn't get cleared due to a fault
- *
- * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-
-#include <asm/asmmacro.h>
-
-//
-// arguments
-//
-#define buf r32
-#define len r33
-
-//
-// local registers
-//
-#define cnt r16
-#define buf2 r17
-#define saved_lc r18
-#define saved_pfs r19
-#define tmp r20
-#define len2 r21
-#define len3 r22
-
-//
-// Theory of operations:
-// - we check whether or not the buffer is small, i.e., less than 17
-// in which case we do the byte by byte loop.
-//
-// - Otherwise we go progressively from 1 byte store to 8byte store in
-// the head part, the body is a 16byte store loop and we finish we the
-// tail for the last 15 bytes.
-// The good point about this breakdown is that the long buffer handling
-// contains only 2 branches.
-//
-// The reason for not using shifting & masking for both the head and the
-// tail is to stay semantically correct. This routine is not supposed
-// to write bytes outside of the buffer. While most of the time this would
-// be ok, we can't tolerate a mistake. A classical example is the case
-// of multithreaded code were to the extra bytes touched is actually owned
-// by another thread which runs concurrently to ours. Another, less likely,
-// example is with device drivers where reading an I/O mapped location may
-// have side effects (same thing for writing).
-//
-
-GLOBAL_ENTRY(__do_clear_user)
- .prologue
- .save ar.pfs, saved_pfs
- alloc saved_pfs=ar.pfs,2,0,0,0
- cmp.eq p6,p0=r0,len // check for zero length
- .save ar.lc, saved_lc
- mov saved_lc=ar.lc // preserve ar.lc (slow)
- .body
- ;; // avoid WAW on CFM
- adds tmp=-1,len // br.ctop is repeat/until
- mov ret0=len // return value is length at this point
-(p6) br.ret.spnt.many rp
- ;;
- cmp.lt p6,p0=16,len // if len > 16 then long memset
- mov ar.lc=tmp // initialize lc for small count
-(p6) br.cond.dptk .long_do_clear
- ;; // WAR on ar.lc
- //
- // worst case 16 iterations, avg 8 iterations
- //
- // We could have played with the predicates to use the extra
- // M slot for 2 stores/iteration but the cost the initialization
- // the various counters compared to how long the loop is supposed
- // to last on average does not make this solution viable.
- //
-1:
- EX( .Lexit1, st1 [buf]=r0,1 )
- adds len=-1,len // countdown length using len
- br.cloop.dptk 1b
- ;; // avoid RAW on ar.lc
- //
- // .Lexit4: comes from byte by byte loop
- // len contains bytes left
-.Lexit1:
- mov ret0=len // faster than using ar.lc
- mov ar.lc=saved_lc
- br.ret.sptk.many rp // end of short clear_user
-
-
- //
- // At this point we know we have more than 16 bytes to copy
- // so we focus on alignment (no branches required)
- //
- // The use of len/len2 for countdown of the number of bytes left
- // instead of ret0 is due to the fact that the exception code
- // changes the values of r8.
- //
-.long_do_clear:
- tbit.nz p6,p0=buf,0 // odd alignment (for long_do_clear)
- ;;
- EX( .Lexit3, (p6) st1 [buf]=r0,1 ) // 1-byte aligned
-(p6) adds len=-1,len;; // sync because buf is modified
- tbit.nz p6,p0=buf,1
- ;;
- EX( .Lexit3, (p6) st2 [buf]=r0,2 ) // 2-byte aligned
-(p6) adds len=-2,len;;
- tbit.nz p6,p0=buf,2
- ;;
- EX( .Lexit3, (p6) st4 [buf]=r0,4 ) // 4-byte aligned
-(p6) adds len=-4,len;;
- tbit.nz p6,p0=buf,3
- ;;
- EX( .Lexit3, (p6) st8 [buf]=r0,8 ) // 8-byte aligned
-(p6) adds len=-8,len;;
- shr.u cnt=len,4 // number of 128-bit (2x64bit) words
- ;;
- cmp.eq p6,p0=r0,cnt
- adds tmp=-1,cnt
-(p6) br.cond.dpnt .dotail // we have less than 16 bytes left
- ;;
- adds buf2=8,buf // setup second base pointer
- mov ar.lc=tmp
- ;;
-
- //
- // 16bytes/iteration core loop
- //
- // The second store can never generate a fault because
- // we come into the loop only when we are 16-byte aligned.
- // This means that if we cross a page then it will always be
- // in the first store and never in the second.
- //
- //
- // We need to keep track of the remaining length. A possible (optimistic)
- // way would be to use ar.lc and derive how many byte were left by
- // doing : left= 16*ar.lc + 16. this would avoid the addition at
- // every iteration.
- // However we need to keep the synchronization point. A template
- // M;;MB does not exist and thus we can keep the addition at no
- // extra cycle cost (use a nop slot anyway). It also simplifies the
- // (unlikely) error recovery code
- //
-
-2: EX(.Lexit3, st8 [buf]=r0,16 )
- ;; // needed to get len correct when error
- st8 [buf2]=r0,16
- adds len=-16,len
- br.cloop.dptk 2b
- ;;
- mov ar.lc=saved_lc
- //
- // tail correction based on len only
- //
- // We alternate the use of len3,len2 to allow parallelism and correct
- // error handling. We also reuse p6/p7 to return correct value.
- // The addition of len2/len3 does not cost anything more compared to
- // the regular memset as we had empty slots.
- //
-.dotail:
- mov len2=len // for parallelization of error handling
- mov len3=len
- tbit.nz p6,p0=len,3
- ;;
- EX( .Lexit2, (p6) st8 [buf]=r0,8 ) // at least 8 bytes
-(p6) adds len3=-8,len2
- tbit.nz p7,p6=len,2
- ;;
- EX( .Lexit2, (p7) st4 [buf]=r0,4 ) // at least 4 bytes
-(p7) adds len2=-4,len3
- tbit.nz p6,p7=len,1
- ;;
- EX( .Lexit2, (p6) st2 [buf]=r0,2 ) // at least 2 bytes
-(p6) adds len3=-2,len2
- tbit.nz p7,p6=len,0
- ;;
- EX( .Lexit2, (p7) st1 [buf]=r0 ) // only 1 byte left
- mov ret0=r0 // success
- br.ret.sptk.many rp // end of most likely path
-
- //
- // Outlined error handling code
- //
-
- //
- // .Lexit3: comes from core loop, need restore pr/lc
- // len contains bytes left
- //
- //
- // .Lexit2:
- // if p6 -> coming from st8 or st2 : len2 contains what's left
- // if p7 -> coming from st4 or st1 : len3 contains what's left
- // We must restore lc/pr even though might not have been used.
-.Lexit2:
- .pred.rel "mutex", p6, p7
-(p6) mov len=len2
-(p7) mov len=len3
- ;;
- //
- // .Lexit4: comes from head, need not restore pr/lc
- // len contains bytes left
- //
-.Lexit3:
- mov ret0=len
- mov ar.lc=saved_lc
- br.ret.sptk.many rp
-END(__do_clear_user)
diff --git a/xen/arch/ia64/linux/copy_page_mck.S b/xen/arch/ia64/linux/copy_page_mck.S
deleted file mode 100644
index 3c45d60a81..0000000000
--- a/xen/arch/ia64/linux/copy_page_mck.S
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * McKinley-optimized version of copy_page().
- *
- * Copyright (C) 2002 Hewlett-Packard Co
- * David Mosberger <davidm@hpl.hp.com>
- *
- * Inputs:
- * in0: address of target page
- * in1: address of source page
- * Output:
- * no return value
- *
- * General idea:
- * - use regular loads and stores to prefetch data to avoid consuming M-slot just for
- * lfetches => good for in-cache performance
- * - avoid l2 bank-conflicts by not storing into the same 16-byte bank within a single
- * cycle
- *
- * Principle of operation:
- * First, note that L1 has a line-size of 64 bytes and L2 a line-size of 128 bytes.
- * To avoid secondary misses in L2, we prefetch both source and destination with a line-size
- * of 128 bytes. When both of these lines are in the L2 and the first half of the
- * source line is in L1, we start copying the remaining words. The second half of the
- * source line is prefetched in an earlier iteration, so that by the time we start
- * accessing it, it's also present in the L1.
- *
- * We use a software-pipelined loop to control the overall operation. The pipeline
- * has 2*PREFETCH_DIST+K stages. The first PREFETCH_DIST stages are used for prefetching
- * source cache-lines. The second PREFETCH_DIST stages are used for prefetching destination
- * cache-lines, the last K stages are used to copy the cache-line words not copied by
- * the prefetches. The four relevant points in the pipelined are called A, B, C, D:
- * p[A] is TRUE if a source-line should be prefetched, p[B] is TRUE if a destination-line
- * should be prefetched, p[C] is TRUE if the second half of an L2 line should be brought
- * into L1D and p[D] is TRUE if a cacheline needs to be copied.
- *
- * This all sounds very complicated, but thanks to the modulo-scheduled loop support,
- * the resulting code is very regular and quite easy to follow (once you get the idea).
- *
- * As a secondary optimization, the first 2*PREFETCH_DIST iterations are implemented
- * as the separate .prefetch_loop. Logically, this loop performs exactly like the
- * main-loop (.line_copy), but has all known-to-be-predicated-off instructions removed,
- * so that each loop iteration is faster (again, good for cached case).
- *
- * When reading the code, it helps to keep the following picture in mind:
- *
- * word 0 word 1
- * +------+------+---
- * | v[x] | t1 | ^
- * | t2 | t3 | |
- * | t4 | t5 | |
- * | t6 | t7 | | 128 bytes
- * | n[y] | t9 | | (L2 cache line)
- * | t10 | t11 | |
- * | t12 | t13 | |
- * | t14 | t15 | v
- * +------+------+---
- *
- * Here, v[x] is copied by the (memory) prefetch. n[y] is loaded at p[C]
- * to fetch the second-half of the L2 cache line into L1, and the tX words are copied in
- * an order that avoids bank conflicts.
- */
-#include <asm/asmmacro.h>
-#include <asm/page.h>
-
-#define PREFETCH_DIST 8 // McKinley sustains 16 outstanding L2 misses (8 ld, 8 st)
-
-#define src0 r2
-#define src1 r3
-#define dst0 r9
-#define dst1 r10
-#define src_pre_mem r11
-#define dst_pre_mem r14
-#define src_pre_l2 r15
-#define dst_pre_l2 r16
-#define t1 r17
-#define t2 r18
-#define t3 r19
-#define t4 r20
-#define t5 t1 // alias!
-#define t6 t2 // alias!
-#define t7 t3 // alias!
-#define t9 t5 // alias!
-#define t10 t4 // alias!
-#define t11 t7 // alias!
-#define t12 t6 // alias!
-#define t14 t10 // alias!
-#define t13 r21
-#define t15 r22
-
-#define saved_lc r23
-#define saved_pr r24
-
-#define A 0
-#define B (PREFETCH_DIST)
-#define C (B + PREFETCH_DIST)
-#define D (C + 3)
-#define N (D + 1)
-#define Nrot ((N + 7) & ~7)
-
-GLOBAL_ENTRY(copy_page)
- .prologue
- alloc r8 = ar.pfs, 2, Nrot-2, 0, Nrot
-
- .rotr v[2*PREFETCH_DIST], n[D-C+1]
- .rotp p[N]
-
- .save ar.lc, saved_lc
- mov saved_lc = ar.lc
- .save pr, saved_pr
- mov saved_pr = pr
- .body
-
- mov src_pre_mem = in1
- mov pr.rot = 0x10000
- mov ar.ec = 1 // special unrolled loop
-
- mov dst_pre_mem = in0
- mov ar.lc = 2*PREFETCH_DIST - 1
-
- add src_pre_l2 = 8*8, in1
- add dst_pre_l2 = 8*8, in0
- add src0 = 8, in1 // first t1 src
- add src1 = 3*8, in1 // first t3 src
- add dst0 = 8, in0 // first t1 dst
- add dst1 = 3*8, in0 // first t3 dst
- mov t1 = (PAGE_SIZE/128) - (2*PREFETCH_DIST) - 1
- nop.m 0
- nop.i 0
- ;;
- // same as .line_copy loop, but with all predicated-off instructions removed:
-.prefetch_loop:
-(p[A]) ld8 v[A] = [src_pre_mem], 128 // M0
-(p[B]) st8 [dst_pre_mem] = v[B], 128 // M2
- br.ctop.sptk .prefetch_loop
- ;;
- cmp.eq p16, p0 = r0, r0 // reset p16 to 1 (br.ctop cleared it to zero)
- mov ar.lc = t1 // with 64KB pages, t1 is too big to fit in 8 bits!
- mov ar.ec = N // # of stages in pipeline
- ;;
-.line_copy:
-(p[D]) ld8 t2 = [src0], 3*8 // M0
-(p[D]) ld8 t4 = [src1], 3*8 // M1
-(p[B]) st8 [dst_pre_mem] = v[B], 128 // M2 prefetch dst from memory
-(p[D]) st8 [dst_pre_l2] = n[D-C], 128 // M3 prefetch dst from L2
- ;;
-(p[A]) ld8 v[A] = [src_pre_mem], 128 // M0 prefetch src from memory
-(p[C]) ld8 n[0] = [src_pre_l2], 128 // M1 prefetch src from L2
-(p[D]) st8 [dst0] = t1, 8 // M2
-(p[D]) st8 [dst1] = t3, 8 // M3
- ;;
-(p[D]) ld8 t5 = [src0], 8
-(p[D]) ld8 t7 = [src1], 3*8
-(p[D]) st8 [dst0] = t2, 3*8
-(p[D]) st8 [dst1] = t4, 3*8
- ;;
-(p[D]) ld8 t6 = [src0], 3*8
-(p[D]) ld8 t10 = [src1], 8
-(p[D]) st8 [dst0] = t5, 8
-(p[D]) st8 [dst1] = t7, 3*8
- ;;
-(p[D]) ld8 t9 = [src0], 3*8
-(p[D]) ld8 t11 = [src1], 3*8
-(p[D]) st8 [dst0] = t6, 3*8
-(p[D]) st8 [dst1] = t10, 8
- ;;
-(p[D]) ld8 t12 = [src0], 8
-(p[D]) ld8 t14 = [src1], 8
-(p[D]) st8 [dst0] = t9, 3*8
-(p[D]) st8 [dst1] = t11, 3*8
- ;;
-(p[D]) ld8 t13 = [src0], 4*8
-(p[D]) ld8 t15 = [src1], 4*8
-(p[D]) st8 [dst0] = t12, 8
-(p[D]) st8 [dst1] = t14, 8
- ;;
-(p[D-1])ld8 t1 = [src0], 8
-(p[D-1])ld8 t3 = [src1], 8
-(p[D]) st8 [dst0] = t13, 4*8
-(p[D]) st8 [dst1] = t15, 4*8
- br.ctop.sptk .line_copy
- ;;
- mov ar.lc = saved_lc
- mov pr = saved_pr, -1
- br.ret.sptk.many rp
-END(copy_page)
diff --git a/xen/arch/ia64/linux/dig/Makefile b/xen/arch/ia64/linux/dig/Makefile
deleted file mode 100644
index 38f6b9375d..0000000000
--- a/xen/arch/ia64/linux/dig/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y += machvec.o
diff --git a/xen/arch/ia64/linux/dig/README.origin b/xen/arch/ia64/linux/dig/README.origin
deleted file mode 100644
index 0976d1dd60..0000000000
--- a/xen/arch/ia64/linux/dig/README.origin
+++ /dev/null
@@ -1,7 +0,0 @@
-Source files in this directory are identical copies of linux-2.6.19 files:
-
-NOTE: DO NOT commit changes to these files! If a file
-needs to be changed, move it to ../linux-xen and follow
-the instructions in the README there.
-
-machvec.c -> linux/arch/ia64/dig/machvec.c
diff --git a/xen/arch/ia64/linux/dig/machvec.c b/xen/arch/ia64/linux/dig/machvec.c
deleted file mode 100644
index 0c55bdafb4..0000000000
--- a/xen/arch/ia64/linux/dig/machvec.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#define MACHVEC_PLATFORM_NAME dig
-#define MACHVEC_PLATFORM_HEADER <asm/machvec_dig.h>
-#include <asm/machvec_init.h>
diff --git a/xen/arch/ia64/linux/efi_stub.S b/xen/arch/ia64/linux/efi_stub.S
deleted file mode 100644
index 5a7fe70212..0000000000
--- a/xen/arch/ia64/linux/efi_stub.S
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * EFI call stub.
- *
- * Copyright (C) 1999-2001 Hewlett-Packard Co
- * David Mosberger <davidm@hpl.hp.com>
- *
- * This stub allows us to make EFI calls in physical mode with interrupts
- * turned off. We need this because we can't call SetVirtualMap() until
- * the kernel has booted far enough to allow allocation of struct vma_struct
- * entries (which we would need to map stuff with memory attributes other
- * than uncached or writeback...). Since the GetTime() service gets called
- * earlier than that, we need to be able to make physical mode EFI calls from
- * the kernel.
- */
-
-/*
- * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System
- * Abstraction Layer Specification", revision 2.6e). Note that
- * psr.dfl and psr.dfh MUST be cleared, despite what this manual says.
- * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call
- * (the br.ia instruction fails unless psr.dfl and psr.dfh are
- * cleared). Fortunately, SAL promises not to touch the floating
- * point regs, so at least we don't have to save f2-f127.
- */
-#define PSR_BITS_TO_CLEAR \
- (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
- IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
- IA64_PSR_DFL | IA64_PSR_DFH)
-
-#define PSR_BITS_TO_SET \
- (IA64_PSR_BN)
-
-#include <asm/processor.h>
-#include <asm/asmmacro.h>
-
-/*
- * Inputs:
- * in0 = address of function descriptor of EFI routine to call
- * in1..in7 = arguments to routine
- *
- * Outputs:
- * r8 = EFI_STATUS returned by called function
- */
-
-GLOBAL_ENTRY(efi_call_phys)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,7,7,0
- ld8 r2=[in0],8 // load EFI function's entry point
- mov loc0=rp
- .body
- ;;
- mov loc2=gp // save global pointer
- mov loc4=ar.rsc // save RSE configuration
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- ;;
- ld8 gp=[in0] // load EFI function's global pointer
- movl r16=PSR_BITS_TO_CLEAR
- mov loc3=psr // save processor status word
- movl r17=PSR_BITS_TO_SET
- ;;
- or loc3=loc3,r17
- mov b6=r2
- ;;
- andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
- br.call.sptk.many rp=ia64_switch_mode_phys
-.ret0: mov out4=in5
- mov out0=in1
- mov out1=in2
- mov out2=in3
- mov out3=in4
- mov out5=in6
- mov out6=in7
- mov loc5=r19
- mov loc6=r20
- br.call.sptk.many rp=b6 // call the EFI function
-.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- mov r16=loc3
- mov r19=loc5
- mov r20=loc6
- br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
-.ret2: mov ar.rsc=loc4 // restore RSE configuration
- mov ar.pfs=loc1
- mov rp=loc0
- mov gp=loc2
- br.ret.sptk.many rp
-END(efi_call_phys)
diff --git a/xen/arch/ia64/linux/extable.c b/xen/arch/ia64/linux/extable.c
deleted file mode 100644
index 6d259e34f3..0000000000
--- a/xen/arch/ia64/linux/extable.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Kernel exception handling table support. Derived from arch/alpha/mm/extable.c.
- *
- * Copyright (C) 1998, 1999, 2001-2002, 2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/config.h>
-#include <linux/sort.h>
-
-#include <asm/uaccess.h>
-#include <asm/module.h>
-
-static int cmp_ex(const void *a, const void *b)
-{
- const struct exception_table_entry *l = a, *r = b;
- u64 lip = (u64) &l->addr + l->addr;
- u64 rip = (u64) &r->addr + r->addr;
-
- /* avoid overflow */
- if (lip > rip)
- return 1;
- if (lip < rip)
- return -1;
- return 0;
-}
-
-static void swap_ex(void *a, void *b, int size)
-{
- struct exception_table_entry *l = a, *r = b, tmp;
- u64 delta = (u64) r - (u64) l;
-
- tmp = *l;
- l->addr = r->addr + delta;
- l->cont = r->cont + delta;
- r->addr = tmp.addr - delta;
- r->cont = tmp.cont - delta;
-}
-
-/*
- * Sort the exception table. It's usually already sorted, but there
- * may be unordered entries due to multiple text sections (such as the
- * .init text section). Note that the exception-table-entries contain
- * location-relative addresses, which requires a bit of care during
- * sorting to avoid overflows in the offset members (e.g., it would
- * not be safe to make a temporary copy of an exception-table entry on
- * the stack, because the stack may be more than 2GB away from the
- * exception-table).
- */
-void sort_extable (struct exception_table_entry *start,
- struct exception_table_entry *finish)
-{
- sort(start, finish - start, sizeof(struct exception_table_entry),
- cmp_ex, swap_ex);
-}
-
-const struct exception_table_entry *
-search_extable (const struct exception_table_entry *first,
- const struct exception_table_entry *last,
- unsigned long ip)
-{
- const struct exception_table_entry *mid;
- unsigned long mid_ip;
- long diff;
-
- while (first <= last) {
- mid = &first[(last - first)/2];
- mid_ip = (u64) &mid->addr + mid->addr;
- diff = mid_ip - ip;
- if (diff == 0)
- return mid;
- else if (diff < 0)
- first = mid + 1;
- else
- last = mid - 1;
- }
- return NULL;
-}
-
-void
-ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e)
-{
- long fix = (u64) &e->cont + e->cont;
-
- regs->r8 = -EFAULT;
- if (fix & 4)
- regs->r9 = 0;
- regs->cr_iip = fix & ~0xf;
- ia64_psr(regs)->ri = fix & 0x3; /* set continuation slot number */
-}
diff --git a/xen/arch/ia64/linux/flush.S b/xen/arch/ia64/linux/flush.S
deleted file mode 100644
index 3e2cfa2c6d..0000000000
--- a/xen/arch/ia64/linux/flush.S
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Cache flushing routines.
- *
- * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 05/28/05 Zoltan Menyhart Dynamic stride size
- */
-
-#include <asm/asmmacro.h>
-
-
- /*
- * flush_icache_range(start,end)
- *
- * Make i-cache(s) coherent with d-caches.
- *
- * Must deal with range from start to end-1 but nothing else (need to
- * be careful not to touch addresses that may be unmapped).
- *
- * Note: "in0" and "in1" are preserved for debugging purposes.
- */
-GLOBAL_ENTRY(flush_icache_range)
-
- .prologue
- alloc r2=ar.pfs,2,0,0,0
- movl r3=ia64_i_cache_stride_shift
- mov r21=1
- ;;
- ld8 r20=[r3] // r20: stride shift
- sub r22=in1,r0,1 // last byte address
- ;;
- shr.u r23=in0,r20 // start / (stride size)
- shr.u r22=r22,r20 // (last byte address) / (stride size)
- shl r21=r21,r20 // r21: stride size of the i-cache(s)
- ;;
- sub r8=r22,r23 // number of strides - 1
- shl r24=r23,r20 // r24: addresses for "fc.i" =
- // "start" rounded down to stride boundary
- .save ar.lc,r3
- mov r3=ar.lc // save ar.lc
- ;;
-
- .body
- mov ar.lc=r8
- ;;
- /*
- * 32 byte aligned loop, even number of (actually 2) bundles
- */
-.Loop: fc.i r24 // issuable on M0 only
- add r24=r21,r24 // we flush "stride size" bytes per iteration
- nop.i 0
- br.cloop.sptk.few .Loop
- ;;
- sync.i
- ;;
- srlz.i
- ;;
- mov ar.lc=r3 // restore ar.lc
- br.ret.sptk.many rp
-END(flush_icache_range)
diff --git a/xen/arch/ia64/linux/hp/Makefile b/xen/arch/ia64/linux/hp/Makefile
deleted file mode 100644
index fc712eb29b..0000000000
--- a/xen/arch/ia64/linux/hp/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y += zx1
diff --git a/xen/arch/ia64/linux/hp/zx1/Makefile b/xen/arch/ia64/linux/hp/zx1/Makefile
deleted file mode 100644
index c80bcd4ed7..0000000000
--- a/xen/arch/ia64/linux/hp/zx1/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y += hpzx1_machvec.o
diff --git a/xen/arch/ia64/linux/hp/zx1/README.origin b/xen/arch/ia64/linux/hp/zx1/README.origin
deleted file mode 100644
index 598e743752..0000000000
--- a/xen/arch/ia64/linux/hp/zx1/README.origin
+++ /dev/null
@@ -1,7 +0,0 @@
-Source files in this directory are identical copies of linux-2.6.19 files:
-
-NOTE: DO NOT commit changes to these files! If a file
-needs to be changed, move it to ../linux-xen and follow
-the instructions in the README there.
-
-hpzx1_machvec.c -> linux/arch/ia64/hp/zx1/hpzx1_machvec.c
diff --git a/xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c b/xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c
deleted file mode 100644
index 32518b0f92..0000000000
--- a/xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#define MACHVEC_PLATFORM_NAME hpzx1
-#define MACHVEC_PLATFORM_HEADER <asm/machvec_hpzx1.h>
-#include <asm/machvec_init.h>
diff --git a/xen/arch/ia64/linux/hpsim.S b/xen/arch/ia64/linux/hpsim.S
deleted file mode 100644
index ff16e8a857..0000000000
--- a/xen/arch/ia64/linux/hpsim.S
+++ /dev/null
@@ -1,10 +0,0 @@
-#include <asm/asmmacro.h>
-
-/*
- * Simulator system call.
- */
-GLOBAL_ENTRY(ia64_ssc)
- mov r15=r36
- break 0x80001
- br.ret.sptk.many rp
-END(ia64_ssc)
diff --git a/xen/arch/ia64/linux/idiv32.S b/xen/arch/ia64/linux/idiv32.S
deleted file mode 100644
index 2ac28bf0a6..0000000000
--- a/xen/arch/ia64/linux/idiv32.S
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2000 Hewlett-Packard Co
- * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 32-bit integer division.
- *
- * This code is based on the application note entitled "Divide, Square Root
- * and Remainder Algorithms for the IA-64 Architecture". This document
- * is available as Intel document number 248725-002 or via the web at
- * http://developer.intel.com/software/opensource/numerics/
- *
- * For more details on the theory behind these algorithms, see "IA-64
- * and Elementary Functions" by Peter Markstein; HP Professional Books
- * (http://www.hp.com/go/retailbooks/)
- */
-
-#include <asm/asmmacro.h>
-
-#ifdef MODULO
-# define OP mod
-#else
-# define OP div
-#endif
-
-#ifdef UNSIGNED
-# define SGN u
-# define EXTEND zxt4
-# define INT_TO_FP(a,b) fcvt.xuf.s1 a=b
-# define FP_TO_INT(a,b) fcvt.fxu.trunc.s1 a=b
-#else
-# define SGN
-# define EXTEND sxt4
-# define INT_TO_FP(a,b) fcvt.xf a=b
-# define FP_TO_INT(a,b) fcvt.fx.trunc.s1 a=b
-#endif
-
-#define PASTE1(a,b) a##b
-#define PASTE(a,b) PASTE1(a,b)
-#define NAME PASTE(PASTE(__,SGN),PASTE(OP,si3))
-
-GLOBAL_ENTRY(NAME)
- .regstk 2,0,0,0
- // Transfer inputs to FP registers.
- mov r2 = 0xffdd // r2 = -34 + 65535 (fp reg format bias)
- EXTEND in0 = in0 // in0 = a
- EXTEND in1 = in1 // in1 = b
- ;;
- setf.sig f8 = in0
- setf.sig f9 = in1
-#ifdef MODULO
- sub in1 = r0, in1 // in1 = -b
-#endif
- ;;
- // Convert the inputs to FP, to avoid FP software-assist faults.
- INT_TO_FP(f8, f8)
- INT_TO_FP(f9, f9)
- ;;
- setf.exp f7 = r2 // f7 = 2^-34
- frcpa.s1 f6, p6 = f8, f9 // y0 = frcpa(b)
- ;;
-(p6) fmpy.s1 f8 = f8, f6 // q0 = a*y0
-(p6) fnma.s1 f6 = f9, f6, f1 // e0 = -b*y0 + 1
- ;;
-#ifdef MODULO
- setf.sig f9 = in1 // f9 = -b
-#endif
-(p6) fma.s1 f8 = f6, f8, f8 // q1 = e0*q0 + q0
-(p6) fma.s1 f6 = f6, f6, f7 // e1 = e0*e0 + 2^-34
- ;;
-#ifdef MODULO
- setf.sig f7 = in0
-#endif
-(p6) fma.s1 f6 = f6, f8, f8 // q2 = e1*q1 + q1
- ;;
- FP_TO_INT(f6, f6) // q = trunc(q2)
- ;;
-#ifdef MODULO
- xma.l f6 = f6, f9, f7 // r = q*(-b) + a
- ;;
-#endif
- getf.sig r8 = f6 // transfer result to result register
- br.ret.sptk.many rp
-END(NAME)
diff --git a/xen/arch/ia64/linux/idiv64.S b/xen/arch/ia64/linux/idiv64.S
deleted file mode 100644
index f69bd2b098..0000000000
--- a/xen/arch/ia64/linux/idiv64.S
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (C) 1999-2000 Hewlett-Packard Co
- * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 64-bit integer division.
- *
- * This code is based on the application note entitled "Divide, Square Root
- * and Remainder Algorithms for the IA-64 Architecture". This document
- * is available as Intel document number 248725-002 or via the web at
- * http://developer.intel.com/software/opensource/numerics/
- *
- * For more details on the theory behind these algorithms, see "IA-64
- * and Elementary Functions" by Peter Markstein; HP Professional Books
- * (http://www.hp.com/go/retailbooks/)
- */
-
-#include <asm/asmmacro.h>
-
-#ifdef MODULO
-# define OP mod
-#else
-# define OP div
-#endif
-
-#ifdef UNSIGNED
-# define SGN u
-# define INT_TO_FP(a,b) fcvt.xuf.s1 a=b
-# define FP_TO_INT(a,b) fcvt.fxu.trunc.s1 a=b
-#else
-# define SGN
-# define INT_TO_FP(a,b) fcvt.xf a=b
-# define FP_TO_INT(a,b) fcvt.fx.trunc.s1 a=b
-#endif
-
-#define PASTE1(a,b) a##b
-#define PASTE(a,b) PASTE1(a,b)
-#define NAME PASTE(PASTE(__,SGN),PASTE(OP,di3))
-
-GLOBAL_ENTRY(NAME)
- .regstk 2,0,0,0
- // Transfer inputs to FP registers.
- setf.sig f8 = in0
- setf.sig f9 = in1
- ;;
- // Convert the inputs to FP, to avoid FP software-assist faults.
- INT_TO_FP(f8, f8)
- INT_TO_FP(f9, f9)
- ;;
- frcpa.s1 f11, p6 = f8, f9 // y0 = frcpa(b)
- ;;
-(p6) fmpy.s1 f7 = f8, f11 // q0 = a*y0
-(p6) fnma.s1 f6 = f9, f11, f1 // e0 = -b*y0 + 1
- ;;
-(p6) fma.s1 f10 = f7, f6, f7 // q1 = q0*e0 + q0
-(p6) fmpy.s1 f7 = f6, f6 // e1 = e0*e0
- ;;
-#ifdef MODULO
- sub in1 = r0, in1 // in1 = -b
-#endif
-(p6) fma.s1 f10 = f10, f7, f10 // q2 = q1*e1 + q1
-(p6) fma.s1 f6 = f11, f6, f11 // y1 = y0*e0 + y0
- ;;
-(p6) fma.s1 f6 = f6, f7, f6 // y2 = y1*e1 + y1
-(p6) fnma.s1 f7 = f9, f10, f8 // r = -b*q2 + a
- ;;
-#ifdef MODULO
- setf.sig f8 = in0 // f8 = a
- setf.sig f9 = in1 // f9 = -b
-#endif
-(p6) fma.s1 f11 = f7, f6, f10 // q3 = r*y2 + q2
- ;;
- FP_TO_INT(f11, f11) // q = trunc(q3)
- ;;
-#ifdef MODULO
- xma.l f11 = f11, f9, f8 // r = q*(-b) + a
- ;;
-#endif
- getf.sig r8 = f11 // transfer result to result register
- br.ret.sptk.many rp
-END(NAME)
diff --git a/xen/arch/ia64/linux/io.c b/xen/arch/ia64/linux/io.c
deleted file mode 100644
index bcd16f8ad9..0000000000
--- a/xen/arch/ia64/linux/io.c
+++ /dev/null
@@ -1,164 +0,0 @@
-#include <linux/module.h>
-#include <linux/types.h>
-
-#include <asm/io.h>
-
-/*
- * Copy data from IO memory space to "real" memory space.
- * This needs to be optimized.
- */
-void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
-{
- char *dst = to;
-
- while (count) {
- count--;
- *dst++ = readb(from++);
- }
-}
-EXPORT_SYMBOL(memcpy_fromio);
-
-/*
- * Copy data from "real" memory space to IO memory space.
- * This needs to be optimized.
- */
-void memcpy_toio(volatile void __iomem *to, const void *from, long count)
-{
- const char *src = from;
-
- while (count) {
- count--;
- writeb(*src++, to++);
- }
-}
-EXPORT_SYMBOL(memcpy_toio);
-
-/*
- * "memset" on IO memory space.
- * This needs to be optimized.
- */
-void memset_io(volatile void __iomem *dst, int c, long count)
-{
- unsigned char ch = (char)(c & 0xff);
-
- while (count) {
- count--;
- writeb(ch, dst);
- dst++;
- }
-}
-EXPORT_SYMBOL(memset_io);
-
-#ifdef CONFIG_IA64_GENERIC
-
-#undef __ia64_inb
-#undef __ia64_inw
-#undef __ia64_inl
-#undef __ia64_outb
-#undef __ia64_outw
-#undef __ia64_outl
-#undef __ia64_readb
-#undef __ia64_readw
-#undef __ia64_readl
-#undef __ia64_readq
-#undef __ia64_readb_relaxed
-#undef __ia64_readw_relaxed
-#undef __ia64_readl_relaxed
-#undef __ia64_readq_relaxed
-#undef __ia64_writeb
-#undef __ia64_writew
-#undef __ia64_writel
-#undef __ia64_writeq
-#undef __ia64_mmiowb
-
-unsigned int
-__ia64_inb (unsigned long port)
-{
- return ___ia64_inb(port);
-}
-
-unsigned int
-__ia64_inw (unsigned long port)
-{
- return ___ia64_inw(port);
-}
-
-unsigned int
-__ia64_inl (unsigned long port)
-{
- return ___ia64_inl(port);
-}
-
-void
-__ia64_outb (unsigned char val, unsigned long port)
-{
- ___ia64_outb(val, port);
-}
-
-void
-__ia64_outw (unsigned short val, unsigned long port)
-{
- ___ia64_outw(val, port);
-}
-
-void
-__ia64_outl (unsigned int val, unsigned long port)
-{
- ___ia64_outl(val, port);
-}
-
-unsigned char
-__ia64_readb (void __iomem *addr)
-{
- return ___ia64_readb (addr);
-}
-
-unsigned short
-__ia64_readw (void __iomem *addr)
-{
- return ___ia64_readw (addr);
-}
-
-unsigned int
-__ia64_readl (void __iomem *addr)
-{
- return ___ia64_readl (addr);
-}
-
-unsigned long
-__ia64_readq (void __iomem *addr)
-{
- return ___ia64_readq (addr);
-}
-
-unsigned char
-__ia64_readb_relaxed (void __iomem *addr)
-{
- return ___ia64_readb (addr);
-}
-
-unsigned short
-__ia64_readw_relaxed (void __iomem *addr)
-{
- return ___ia64_readw (addr);
-}
-
-unsigned int
-__ia64_readl_relaxed (void __iomem *addr)
-{
- return ___ia64_readl (addr);
-}
-
-unsigned long
-__ia64_readq_relaxed (void __iomem *addr)
-{
- return ___ia64_readq (addr);
-}
-
-void
-__ia64_mmiowb(void)
-{
- ___ia64_mmiowb();
-}
-
-#endif /* CONFIG_IA64_GENERIC */
diff --git a/xen/arch/ia64/linux/linuxextable.c b/xen/arch/ia64/linux/linuxextable.c
deleted file mode 100644
index 98be4a49ad..0000000000
--- a/xen/arch/ia64/linux/linuxextable.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/* Rewritten by Rusty Russell, on the backs of many others...
- Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <asm/uaccess.h>
-#include <asm/sections.h>
-
-extern void *search_module_extables(unsigned long addr);
-extern void *__module_text_address(unsigned long addr);
-extern void *module_text_address(unsigned long addr);
-
-extern struct exception_table_entry __start___ex_table[];
-extern struct exception_table_entry __stop___ex_table[];
-
-/* Sort the kernel's built-in exception table */
-void __init sort_main_extable(void)
-{
- sort_extable(__start___ex_table, __stop___ex_table);
-}
-
-/* Given an address, look for it in the exception tables. */
-const struct exception_table_entry *search_exception_tables(unsigned long addr)
-{
- const struct exception_table_entry *e;
-
- e = search_extable(__start___ex_table, __stop___ex_table-1, addr);
- if (!e)
- e = search_module_extables(addr);
- return e;
-}
-
-static int core_kernel_text(unsigned long addr)
-{
- if (addr >= (unsigned long)_stext &&
- addr <= (unsigned long)_etext)
- return 1;
-
- if (addr >= (unsigned long)_sinittext &&
- addr <= (unsigned long)_einittext)
- return 1;
- return 0;
-}
-
-int __kernel_text_address(unsigned long addr)
-{
- if (core_kernel_text(addr))
- return 1;
- return __module_text_address(addr) != NULL;
-}
-
-int kernel_text_address(unsigned long addr)
-{
- if (core_kernel_text(addr))
- return 1;
- return module_text_address(addr) != NULL;
-}
diff --git a/xen/arch/ia64/linux/machvec.c b/xen/arch/ia64/linux/machvec.c
deleted file mode 100644
index c3a04ee7f4..0000000000
--- a/xen/arch/ia64/linux/machvec.c
+++ /dev/null
@@ -1,70 +0,0 @@
-#include <linux/config.h>
-#include <linux/module.h>
-
-#include <asm/machvec.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_IA64_GENERIC
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-
-#include <asm/page.h>
-
-struct ia64_machine_vector ia64_mv;
-EXPORT_SYMBOL(ia64_mv);
-
-static struct ia64_machine_vector *
-lookup_machvec (const char *name)
-{
- extern struct ia64_machine_vector machvec_start[];
- extern struct ia64_machine_vector machvec_end[];
- struct ia64_machine_vector *mv;
-
- for (mv = machvec_start; mv < machvec_end; ++mv)
- if (strcmp (mv->name, name) == 0)
- return mv;
-
- return 0;
-}
-
-void
-machvec_init (const char *name)
-{
- struct ia64_machine_vector *mv;
-
- mv = lookup_machvec(name);
- if (!mv) {
- panic("generic kernel failed to find machine vector for platform %s!", name);
- }
- ia64_mv = *mv;
- printk(KERN_INFO "booting generic kernel on platform %s\n", name);
-}
-
-#endif /* CONFIG_IA64_GENERIC */
-
-void
-machvec_setup (char **arg)
-{
-}
-EXPORT_SYMBOL(machvec_setup);
-
-void
-machvec_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
-{
-}
-EXPORT_SYMBOL(machvec_timer_interrupt);
-
-void
-machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir)
-{
- mb();
-}
-EXPORT_SYMBOL(machvec_dma_sync_single);
-
-void
-machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir)
-{
- mb();
-}
-EXPORT_SYMBOL(machvec_dma_sync_sg);
diff --git a/xen/arch/ia64/linux/memcpy_mck.S b/xen/arch/ia64/linux/memcpy_mck.S
deleted file mode 100644
index 6f308e62c1..0000000000
--- a/xen/arch/ia64/linux/memcpy_mck.S
+++ /dev/null
@@ -1,661 +0,0 @@
-/*
- * Itanium 2-optimized version of memcpy and copy_user function
- *
- * Inputs:
- * in0: destination address
- * in1: source address
- * in2: number of bytes to copy
- * Output:
- * 0 if success, or number of byte NOT copied if error occurred.
- *
- * Copyright (C) 2002 Intel Corp.
- * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com>
- */
-#include <linux/config.h>
-#include <asm/asmmacro.h>
-#include <asm/page.h>
-
-#define EK(y...) EX(y)
-
-/* McKinley specific optimization */
-
-#define retval r8
-#define saved_pfs r31
-#define saved_lc r10
-#define saved_pr r11
-#define saved_in0 r14
-#define saved_in1 r15
-#define saved_in2 r16
-
-#define src0 r2
-#define src1 r3
-#define dst0 r17
-#define dst1 r18
-#define cnt r9
-
-/* r19-r30 are temp for each code section */
-#define PREFETCH_DIST 8
-#define src_pre_mem r19
-#define dst_pre_mem r20
-#define src_pre_l2 r21
-#define dst_pre_l2 r22
-#define t1 r23
-#define t2 r24
-#define t3 r25
-#define t4 r26
-#define t5 t1 // alias!
-#define t6 t2 // alias!
-#define t7 t3 // alias!
-#define n8 r27
-#define t9 t5 // alias!
-#define t10 t4 // alias!
-#define t11 t7 // alias!
-#define t12 t6 // alias!
-#define t14 t10 // alias!
-#define t13 r28
-#define t15 r29
-#define tmp r30
-
-/* defines for long_copy block */
-#define A 0
-#define B (PREFETCH_DIST)
-#define C (B + PREFETCH_DIST)
-#define D (C + 1)
-#define N (D + 1)
-#define Nrot ((N + 7) & ~7)
-
-/* alias */
-#define in0 r32
-#define in1 r33
-#define in2 r34
-
-GLOBAL_ENTRY(memcpy)
- and r28=0x7,in0
- and r29=0x7,in1
- mov f6=f0
- br.cond.sptk .common_code
- ;;
-END(memcpy)
-GLOBAL_ENTRY(__copy_user)
- .prologue
-// check dest alignment
- and r28=0x7,in0
- and r29=0x7,in1
- mov f6=f1
- mov saved_in0=in0 // save dest pointer
- mov saved_in1=in1 // save src pointer
- mov saved_in2=in2 // save len
- ;;
-.common_code:
- cmp.gt p15,p0=8,in2 // check for small size
- cmp.ne p13,p0=0,r28 // check dest alignment
- cmp.ne p14,p0=0,r29 // check src alignment
- add src0=0,in1
- sub r30=8,r28 // for .align_dest
- mov retval=r0 // initialize return value
- ;;
- add dst0=0,in0
- add dst1=1,in0 // dest odd index
- cmp.le p6,p0 = 1,r30 // for .align_dest
-(p15) br.cond.dpnt .memcpy_short
-(p13) br.cond.dpnt .align_dest
-(p14) br.cond.dpnt .unaligned_src
- ;;
-
-// both dest and src are aligned on 8-byte boundary
-.aligned_src:
- .save ar.pfs, saved_pfs
- alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot
- .save pr, saved_pr
- mov saved_pr=pr
-
- shr.u cnt=in2,7 // this much cache line
- ;;
- cmp.lt p6,p0=2*PREFETCH_DIST,cnt
- cmp.lt p7,p8=1,cnt
- .save ar.lc, saved_lc
- mov saved_lc=ar.lc
- .body
- add cnt=-1,cnt
- add src_pre_mem=0,in1 // prefetch src pointer
- add dst_pre_mem=0,in0 // prefetch dest pointer
- ;;
-(p7) mov ar.lc=cnt // prefetch count
-(p8) mov ar.lc=r0
-(p6) br.cond.dpnt .long_copy
- ;;
-
-.prefetch:
- lfetch.fault [src_pre_mem], 128
- lfetch.fault.excl [dst_pre_mem], 128
- br.cloop.dptk.few .prefetch
- ;;
-
-.medium_copy:
- and tmp=31,in2 // copy length after iteration
- shr.u r29=in2,5 // number of 32-byte iteration
- add dst1=8,dst0 // 2nd dest pointer
- ;;
- add cnt=-1,r29 // ctop iteration adjustment
- cmp.eq p10,p0=r29,r0 // do we really need to loop?
- add src1=8,src0 // 2nd src pointer
- cmp.le p6,p0=8,tmp
- ;;
- cmp.le p7,p0=16,tmp
- mov ar.lc=cnt // loop setup
- cmp.eq p16,p17 = r0,r0
- mov ar.ec=2
-(p10) br.dpnt.few .aligned_src_tail
- ;;
- TEXT_ALIGN(32)
-1:
-EX(.ex_handler, (p16) ld8 r34=[src0],16)
-EK(.ex_handler, (p16) ld8 r38=[src1],16)
-EX(.ex_handler, (p17) st8 [dst0]=r33,16)
-EK(.ex_handler, (p17) st8 [dst1]=r37,16)
- ;;
-EX(.ex_handler, (p16) ld8 r32=[src0],16)
-EK(.ex_handler, (p16) ld8 r36=[src1],16)
-EX(.ex_handler, (p16) st8 [dst0]=r34,16)
-EK(.ex_handler, (p16) st8 [dst1]=r38,16)
- br.ctop.dptk.few 1b
- ;;
-
-.aligned_src_tail:
-EX(.ex_handler, (p6) ld8 t1=[src0])
- mov ar.lc=saved_lc
- mov ar.pfs=saved_pfs
-EX(.ex_hndlr_s, (p7) ld8 t2=[src1],8)
- cmp.le p8,p0=24,tmp
- and r21=-8,tmp
- ;;
-EX(.ex_hndlr_s, (p8) ld8 t3=[src1])
-EX(.ex_handler, (p6) st8 [dst0]=t1) // store byte 1
- and in2=7,tmp // remaining length
-EX(.ex_hndlr_d, (p7) st8 [dst1]=t2,8) // store byte 2
- add src0=src0,r21 // setting up src pointer
- add dst0=dst0,r21 // setting up dest pointer
- ;;
-EX(.ex_handler, (p8) st8 [dst1]=t3) // store byte 3
- mov pr=saved_pr,-1
- br.dptk.many .memcpy_short
- ;;
-
-/* code taken from copy_page_mck */
-.long_copy:
- .rotr v[2*PREFETCH_DIST]
- .rotp p[N]
-
- mov src_pre_mem = src0
- mov pr.rot = 0x10000
- mov ar.ec = 1 // special unrolled loop
-
- mov dst_pre_mem = dst0
-
- add src_pre_l2 = 8*8, src0
- add dst_pre_l2 = 8*8, dst0
- ;;
- add src0 = 8, src_pre_mem // first t1 src
- mov ar.lc = 2*PREFETCH_DIST - 1
- shr.u cnt=in2,7 // number of lines
- add src1 = 3*8, src_pre_mem // first t3 src
- add dst0 = 8, dst_pre_mem // first t1 dst
- add dst1 = 3*8, dst_pre_mem // first t3 dst
- ;;
- and tmp=127,in2 // remaining bytes after this block
- add cnt = -(2*PREFETCH_DIST) - 1, cnt
- // same as .line_copy loop, but with all predicated-off instructions removed:
-.prefetch_loop:
-EX(.ex_hndlr_lcpy_1, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0
-EK(.ex_hndlr_lcpy_1, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2
- br.ctop.sptk .prefetch_loop
- ;;
- cmp.eq p16, p0 = r0, r0 // reset p16 to 1
- mov ar.lc = cnt
- mov ar.ec = N // # of stages in pipeline
- ;;
-.line_copy:
-EX(.ex_handler, (p[D]) ld8 t2 = [src0], 3*8) // M0
-EK(.ex_handler, (p[D]) ld8 t4 = [src1], 3*8) // M1
-EX(.ex_handler_lcpy, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2 prefetch dst from memory
-EK(.ex_handler_lcpy, (p[D]) st8 [dst_pre_l2] = n8, 128) // M3 prefetch dst from L2
- ;;
-EX(.ex_handler_lcpy, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0 prefetch src from memory
-EK(.ex_handler_lcpy, (p[C]) ld8 n8 = [src_pre_l2], 128) // M1 prefetch src from L2
-EX(.ex_handler, (p[D]) st8 [dst0] = t1, 8) // M2
-EK(.ex_handler, (p[D]) st8 [dst1] = t3, 8) // M3
- ;;
-EX(.ex_handler, (p[D]) ld8 t5 = [src0], 8)
-EK(.ex_handler, (p[D]) ld8 t7 = [src1], 3*8)
-EX(.ex_handler, (p[D]) st8 [dst0] = t2, 3*8)
-EK(.ex_handler, (p[D]) st8 [dst1] = t4, 3*8)
- ;;
-EX(.ex_handler, (p[D]) ld8 t6 = [src0], 3*8)
-EK(.ex_handler, (p[D]) ld8 t10 = [src1], 8)
-EX(.ex_handler, (p[D]) st8 [dst0] = t5, 8)
-EK(.ex_handler, (p[D]) st8 [dst1] = t7, 3*8)
- ;;
-EX(.ex_handler, (p[D]) ld8 t9 = [src0], 3*8)
-EK(.ex_handler, (p[D]) ld8 t11 = [src1], 3*8)
-EX(.ex_handler, (p[D]) st8 [dst0] = t6, 3*8)
-EK(.ex_handler, (p[D]) st8 [dst1] = t10, 8)
- ;;
-EX(.ex_handler, (p[D]) ld8 t12 = [src0], 8)
-EK(.ex_handler, (p[D]) ld8 t14 = [src1], 8)
-EX(.ex_handler, (p[D]) st8 [dst0] = t9, 3*8)
-EK(.ex_handler, (p[D]) st8 [dst1] = t11, 3*8)
- ;;
-EX(.ex_handler, (p[D]) ld8 t13 = [src0], 4*8)
-EK(.ex_handler, (p[D]) ld8 t15 = [src1], 4*8)
-EX(.ex_handler, (p[D]) st8 [dst0] = t12, 8)
-EK(.ex_handler, (p[D]) st8 [dst1] = t14, 8)
- ;;
-EX(.ex_handler, (p[C]) ld8 t1 = [src0], 8)
-EK(.ex_handler, (p[C]) ld8 t3 = [src1], 8)
-EX(.ex_handler, (p[D]) st8 [dst0] = t13, 4*8)
-EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8)
- br.ctop.sptk .line_copy
- ;;
-
- add dst0=-8,dst0
- add src0=-8,src0
- mov in2=tmp
- .restore sp
- br.sptk.many .medium_copy
- ;;
-
-#define BLOCK_SIZE 128*32
-#define blocksize r23
-#define curlen r24
-
-// dest is on 8-byte boundary, src is not. We need to do
-// ld8-ld8, shrp, then st8. Max 8 byte copy per cycle.
-.unaligned_src:
- .prologue
- .save ar.pfs, saved_pfs
- alloc saved_pfs=ar.pfs,3,5,0,8
- .save ar.lc, saved_lc
- mov saved_lc=ar.lc
- .save pr, saved_pr
- mov saved_pr=pr
- .body
-.4k_block:
- mov saved_in0=dst0 // need to save all input arguments
- mov saved_in2=in2
- mov blocksize=BLOCK_SIZE
- ;;
- cmp.lt p6,p7=blocksize,in2
- mov saved_in1=src0
- ;;
-(p6) mov in2=blocksize
- ;;
- shr.u r21=in2,7 // this much cache line
- shr.u r22=in2,4 // number of 16-byte iteration
- and curlen=15,in2 // copy length after iteration
- and r30=7,src0 // source alignment
- ;;
- cmp.lt p7,p8=1,r21
- add cnt=-1,r21
- ;;
-
- add src_pre_mem=0,src0 // prefetch src pointer
- add dst_pre_mem=0,dst0 // prefetch dest pointer
- and src0=-8,src0 // 1st src pointer
-(p7) mov ar.lc = cnt
-(p8) mov ar.lc = r0
- ;;
- TEXT_ALIGN(32)
-1: lfetch.fault [src_pre_mem], 128
- lfetch.fault.excl [dst_pre_mem], 128
- br.cloop.dptk.few 1b
- ;;
-
- shladd dst1=r22,3,dst0 // 2nd dest pointer
- shladd src1=r22,3,src0 // 2nd src pointer
- cmp.eq p8,p9=r22,r0 // do we really need to loop?
- cmp.le p6,p7=8,curlen; // have at least 8 byte remaining?
- add cnt=-1,r22 // ctop iteration adjustment
- ;;
-EX(.ex_handler, (p9) ld8 r33=[src0],8) // loop primer
-EK(.ex_handler, (p9) ld8 r37=[src1],8)
-(p8) br.dpnt.few .noloop
- ;;
-
-// The jump address is calculated based on src alignment. The COPYU
-// macro below need to confine its size to power of two, so an entry
-// can be caulated using shl instead of an expensive multiply. The
-// size is then hard coded by the following #define to match the
-// actual size. This make it somewhat tedious when COPYU macro gets
-// changed and this need to be adjusted to match.
-#define LOOP_SIZE 6
-1:
- mov r29=ip // jmp_table thread
- mov ar.lc=cnt
- ;;
- add r29=.jump_table - 1b - (.jmp1-.jump_table), r29
- shl r28=r30, LOOP_SIZE // jmp_table thread
- mov ar.ec=2 // loop setup
- ;;
- add r29=r29,r28 // jmp_table thread
- cmp.eq p16,p17=r0,r0
- ;;
- mov b6=r29 // jmp_table thread
- ;;
- br.cond.sptk.few b6
-
-// for 8-15 byte case
-// We will skip the loop, but need to replicate the side effect
-// that the loop produces.
-.noloop:
-EX(.ex_handler, (p6) ld8 r37=[src1],8)
- add src0=8,src0
-(p6) shl r25=r30,3
- ;;
-EX(.ex_handler, (p6) ld8 r27=[src1])
-(p6) shr.u r28=r37,r25
-(p6) sub r26=64,r25
- ;;
-(p6) shl r27=r27,r26
- ;;
-(p6) or r21=r28,r27
-
-.unaligned_src_tail:
-/* check if we have more than blocksize to copy, if so go back */
- cmp.gt p8,p0=saved_in2,blocksize
- ;;
-(p8) add dst0=saved_in0,blocksize
-(p8) add src0=saved_in1,blocksize
-(p8) sub in2=saved_in2,blocksize
-(p8) br.dpnt .4k_block
- ;;
-
-/* we have up to 15 byte to copy in the tail.
- * part of work is already done in the jump table code
- * we are at the following state.
- * src side:
- *
- * xxxxxx xx <----- r21 has xxxxxxxx already
- * -------- -------- --------
- * 0 8 16
- * ^
- * |
- * src1
- *
- * dst
- * -------- -------- --------
- * ^
- * |
- * dst1
- */
-EX(.ex_handler, (p6) st8 [dst1]=r21,8) // more than 8 byte to copy
-(p6) add curlen=-8,curlen // update length
- mov ar.pfs=saved_pfs
- ;;
- mov ar.lc=saved_lc
- mov pr=saved_pr,-1
- mov in2=curlen // remaining length
- mov dst0=dst1 // dest pointer
- add src0=src1,r30 // forward by src alignment
- ;;
-
-// 7 byte or smaller.
-.memcpy_short:
- cmp.le p8,p9 = 1,in2
- cmp.le p10,p11 = 2,in2
- cmp.le p12,p13 = 3,in2
- cmp.le p14,p15 = 4,in2
- add src1=1,src0 // second src pointer
- add dst1=1,dst0 // second dest pointer
- ;;
-
-EX(.ex_handler_short, (p8) ld1 t1=[src0],2)
-EK(.ex_handler_short, (p10) ld1 t2=[src1],2)
-(p9) br.ret.dpnt rp // 0 byte copy
- ;;
-
-EX(.ex_handler_short, (p8) st1 [dst0]=t1,2)
-EK(.ex_handler_short, (p10) st1 [dst1]=t2,2)
-(p11) br.ret.dpnt rp // 1 byte copy
-
-EX(.ex_handler_short, (p12) ld1 t3=[src0],2)
-EK(.ex_handler_short, (p14) ld1 t4=[src1],2)
-(p13) br.ret.dpnt rp // 2 byte copy
- ;;
-
- cmp.le p6,p7 = 5,in2
- cmp.le p8,p9 = 6,in2
- cmp.le p10,p11 = 7,in2
-
-EX(.ex_handler_short, (p12) st1 [dst0]=t3,2)
-EK(.ex_handler_short, (p14) st1 [dst1]=t4,2)
-(p15) br.ret.dpnt rp // 3 byte copy
- ;;
-
-EX(.ex_handler_short, (p6) ld1 t5=[src0],2)
-EK(.ex_handler_short, (p8) ld1 t6=[src1],2)
-(p7) br.ret.dpnt rp // 4 byte copy
- ;;
-
-EX(.ex_handler_short, (p6) st1 [dst0]=t5,2)
-EK(.ex_handler_short, (p8) st1 [dst1]=t6,2)
-(p9) br.ret.dptk rp // 5 byte copy
-
-EX(.ex_handler_short, (p10) ld1 t7=[src0],2)
-(p11) br.ret.dptk rp // 6 byte copy
- ;;
-
-EX(.ex_handler_short, (p10) st1 [dst0]=t7,2)
- br.ret.dptk rp // done all cases
-
-
-/* Align dest to nearest 8-byte boundary. We know we have at
- * least 7 bytes to copy, enough to crawl to 8-byte boundary.
- * Actual number of byte to crawl depend on the dest alignment.
- * 7 byte or less is taken care at .memcpy_short
-
- * src0 - source even index
- * src1 - source odd index
- * dst0 - dest even index
- * dst1 - dest odd index
- * r30 - distance to 8-byte boundary
- */
-
-.align_dest:
- add src1=1,in1 // source odd index
- cmp.le p7,p0 = 2,r30 // for .align_dest
- cmp.le p8,p0 = 3,r30 // for .align_dest
-EX(.ex_handler_short, (p6) ld1 t1=[src0],2)
- cmp.le p9,p0 = 4,r30 // for .align_dest
- cmp.le p10,p0 = 5,r30
- ;;
-EX(.ex_handler_short, (p7) ld1 t2=[src1],2)
-EK(.ex_handler_short, (p8) ld1 t3=[src0],2)
- cmp.le p11,p0 = 6,r30
-EX(.ex_handler_short, (p6) st1 [dst0] = t1,2)
- cmp.le p12,p0 = 7,r30
- ;;
-EX(.ex_handler_short, (p9) ld1 t4=[src1],2)
-EK(.ex_handler_short, (p10) ld1 t5=[src0],2)
-EX(.ex_handler_short, (p7) st1 [dst1] = t2,2)
-EK(.ex_handler_short, (p8) st1 [dst0] = t3,2)
- ;;
-EX(.ex_handler_short, (p11) ld1 t6=[src1],2)
-EK(.ex_handler_short, (p12) ld1 t7=[src0],2)
- cmp.eq p6,p7=r28,r29
-EX(.ex_handler_short, (p9) st1 [dst1] = t4,2)
-EK(.ex_handler_short, (p10) st1 [dst0] = t5,2)
- sub in2=in2,r30
- ;;
-EX(.ex_handler_short, (p11) st1 [dst1] = t6,2)
-EK(.ex_handler_short, (p12) st1 [dst0] = t7)
- add dst0=in0,r30 // setup arguments
- add src0=in1,r30
-(p6) br.cond.dptk .aligned_src
-(p7) br.cond.dpnt .unaligned_src
- ;;
-
-/* main loop body in jump table format */
-#define COPYU(shift) \
-1: \
-EX(.ex_handler, (p16) ld8 r32=[src0],8); /* 1 */ \
-EK(.ex_handler, (p16) ld8 r36=[src1],8); \
- (p17) shrp r35=r33,r34,shift;; /* 1 */ \
-EX(.ex_handler, (p6) ld8 r22=[src1]); /* common, prime for tail section */ \
- nop.m 0; \
- (p16) shrp r38=r36,r37,shift; \
-EX(.ex_handler, (p17) st8 [dst0]=r35,8); /* 1 */ \
-EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
- br.ctop.dptk.few 1b;; \
- (p7) add src1=-8,src1; /* back out for <8 byte case */ \
- shrp r21=r22,r38,shift; /* speculative work */ \
- br.sptk.few .unaligned_src_tail /* branch out of jump table */ \
- ;;
- TEXT_ALIGN(32)
-.jump_table:
- COPYU(8) // unaligned cases
-.jmp1:
- COPYU(16)
- COPYU(24)
- COPYU(32)
- COPYU(40)
- COPYU(48)
- COPYU(56)
-
-#undef A
-#undef B
-#undef C
-#undef D
-
-/*
- * Due to lack of local tag support in gcc 2.x assembler, it is not clear which
- * instruction failed in the bundle. The exception algorithm is that we
- * first figure out the faulting address, then detect if there is any
- * progress made on the copy, if so, redo the copy from last known copied
- * location up to the faulting address (exclusive). In the copy_from_user
- * case, remaining byte in kernel buffer will be zeroed.
- *
- * Take copy_from_user as an example, in the code there are multiple loads
- * in a bundle and those multiple loads could span over two pages, the
- * faulting address is calculated as page_round_down(max(src0, src1)).
- * This is based on knowledge that if we can access one byte in a page, we
- * can access any byte in that page.
- *
- * predicate used in the exception handler:
- * p6-p7: direction
- * p10-p11: src faulting addr calculation
- * p12-p13: dst faulting addr calculation
- */
-
-#define A r19
-#define B r20
-#define C r21
-#define D r22
-#define F r28
-
-#define memset_arg0 r32
-#define memset_arg2 r33
-
-#define saved_retval loc0
-#define saved_rtlink loc1
-#define saved_pfs_stack loc2
-
-.ex_hndlr_s:
- add src0=8,src0
- br.sptk .ex_handler
- ;;
-.ex_hndlr_d:
- add dst0=8,dst0
- br.sptk .ex_handler
- ;;
-.ex_hndlr_lcpy_1:
- mov src1=src_pre_mem
- mov dst1=dst_pre_mem
- cmp.gtu p10,p11=src_pre_mem,saved_in1
- cmp.gtu p12,p13=dst_pre_mem,saved_in0
- ;;
-(p10) add src0=8,saved_in1
-(p11) mov src0=saved_in1
-(p12) add dst0=8,saved_in0
-(p13) mov dst0=saved_in0
- br.sptk .ex_handler
-.ex_handler_lcpy:
- // in line_copy block, the preload addresses should always ahead
- // of the other two src/dst pointers. Furthermore, src1/dst1 should
- // always ahead of src0/dst0.
- mov src1=src_pre_mem
- mov dst1=dst_pre_mem
-.ex_handler:
- mov pr=saved_pr,-1 // first restore pr, lc, and pfs
- mov ar.lc=saved_lc
- mov ar.pfs=saved_pfs
- ;;
-.ex_handler_short: // fault occurred in these sections didn't change pr, lc, pfs
- cmp.ltu p6,p7=saved_in0, saved_in1 // get the copy direction
- cmp.ltu p10,p11=src0,src1
- cmp.ltu p12,p13=dst0,dst1
- fcmp.eq p8,p0=f6,f0 // is it memcpy?
- mov tmp = dst0
- ;;
-(p11) mov src1 = src0 // pick the larger of the two
-(p13) mov dst0 = dst1 // make dst0 the smaller one
-(p13) mov dst1 = tmp // and dst1 the larger one
- ;;
-(p6) dep F = r0,dst1,0,PAGE_SHIFT // usr dst round down to page boundary
-(p7) dep F = r0,src1,0,PAGE_SHIFT // usr src round down to page boundary
- ;;
-(p6) cmp.le p14,p0=dst0,saved_in0 // no progress has been made on store
-(p7) cmp.le p14,p0=src0,saved_in1 // no progress has been made on load
- mov retval=saved_in2
-(p8) ld1 tmp=[src1] // force an oops for memcpy call
-(p8) st1 [dst1]=r0 // force an oops for memcpy call
-(p14) br.ret.sptk.many rp
-
-/*
- * The remaining byte to copy is calculated as:
- *
- * A = (faulting_addr - orig_src) -> len to faulting ld address
- * or
- * (faulting_addr - orig_dst) -> len to faulting st address
- * B = (cur_dst - orig_dst) -> len copied so far
- * C = A - B -> len need to be copied
- * D = orig_len - A -> len need to be zeroed
- */
-(p6) sub A = F, saved_in0
-(p7) sub A = F, saved_in1
- clrrrb
- ;;
- alloc saved_pfs_stack=ar.pfs,3,3,3,0
- sub B = dst0, saved_in0 // how many byte copied so far
- ;;
- sub C = A, B
- sub D = saved_in2, A
- ;;
- cmp.gt p8,p0=C,r0 // more than 1 byte?
- add memset_arg0=saved_in0, A
-(p6) mov memset_arg2=0 // copy_to_user should not call memset
-(p7) mov memset_arg2=D // copy_from_user need to have kbuf zeroed
- mov r8=0
- mov saved_retval = D
- mov saved_rtlink = b0
-
- add out0=saved_in0, B
- add out1=saved_in1, B
- mov out2=C
-(p8) br.call.sptk.few b0=__copy_user // recursive call
- ;;
-
- add saved_retval=saved_retval,r8 // above might return non-zero value
- cmp.gt p8,p0=memset_arg2,r0 // more than 1 byte?
- mov out0=memset_arg0 // *s
- mov out1=r0 // c
- mov out2=memset_arg2 // n
-(p8) br.call.sptk.few b0=memset
- ;;
-
- mov retval=saved_retval
- mov ar.pfs=saved_pfs_stack
- mov b0=saved_rtlink
- br.ret.sptk.many rp
-
-/* end of McKinley specific optimization */
-END(__copy_user)
diff --git a/xen/arch/ia64/linux/memset.S b/xen/arch/ia64/linux/memset.S
deleted file mode 100644
index f26c16aefb..0000000000
--- a/xen/arch/ia64/linux/memset.S
+++ /dev/null
@@ -1,362 +0,0 @@
-/* Optimized version of the standard memset() function.
-
- Copyright (c) 2002 Hewlett-Packard Co/CERN
- Sverre Jarp <Sverre.Jarp@cern.ch>
-
- Return: dest
-
- Inputs:
- in0: dest
- in1: value
- in2: count
-
- The algorithm is fairly straightforward: set byte by byte until we
- we get to a 16B-aligned address, then loop on 128 B chunks using an
- early store as prefetching, then loop on 32B chucks, then clear remaining
- words, finally clear remaining bytes.
- Since a stf.spill f0 can store 16B in one go, we use this instruction
- to get peak speed when value = 0. */
-
-#include <asm/asmmacro.h>
-#undef ret
-
-#define dest in0
-#define value in1
-#define cnt in2
-
-#define tmp r31
-#define save_lc r30
-#define ptr0 r29
-#define ptr1 r28
-#define ptr2 r27
-#define ptr3 r26
-#define ptr9 r24
-#define loopcnt r23
-#define linecnt r22
-#define bytecnt r21
-
-#define fvalue f6
-
-// This routine uses only scratch predicate registers (p6 - p15)
-#define p_scr p6 // default register for same-cycle branches
-#define p_nz p7
-#define p_zr p8
-#define p_unalgn p9
-#define p_y p11
-#define p_n p12
-#define p_yy p13
-#define p_nn p14
-
-#define MIN1 15
-#define MIN1P1HALF 8
-#define LINE_SIZE 128
-#define LSIZE_SH 7 // shift amount
-#define PREF_AHEAD 8
-
-GLOBAL_ENTRY(memset)
-{ .mmi
- .prologue
- alloc tmp = ar.pfs, 3, 0, 0, 0
- lfetch.nt1 [dest] //
- .save ar.lc, save_lc
- mov.i save_lc = ar.lc
- .body
-} { .mmi
- mov ret0 = dest // return value
- cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero
- cmp.eq p_scr, p0 = cnt, r0
-;; }
-{ .mmi
- and ptr2 = -(MIN1+1), dest // aligned address
- and tmp = MIN1, dest // prepare to check for correct alignment
- tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U)
-} { .mib
- mov ptr1 = dest
- mux1 value = value, @brcst // create 8 identical bytes in word
-(p_scr) br.ret.dpnt.many rp // return immediately if count = 0
-;; }
-{ .mib
- cmp.ne p_unalgn, p0 = tmp, r0 //
-} { .mib
- sub bytecnt = (MIN1+1), tmp // NB: # of bytes to move is 1 higher than loopcnt
- cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task?
-(p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U)
-;; }
-{ .mmi
-(p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment
-(p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment
-(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ?
-;; }
-{ .mib
-(p_y) add cnt = -8, cnt //
-(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ?
-} { .mib
-(p_y) st8 [ptr2] = value,-4 //
-(p_n) add ptr2 = 4, ptr2 //
-;; }
-{ .mib
-(p_yy) add cnt = -4, cnt //
-(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ?
-} { .mib
-(p_yy) st4 [ptr2] = value,-2 //
-(p_nn) add ptr2 = 2, ptr2 //
-;; }
-{ .mmi
- mov tmp = LINE_SIZE+1 // for compare
-(p_y) add cnt = -2, cnt //
-(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ?
-} { .mmi
- setf.sig fvalue=value // transfer value to FLP side
-(p_y) st2 [ptr2] = value,-1 //
-(p_n) add ptr2 = 1, ptr2 //
-;; }
-
-{ .mmi
-(p_yy) st1 [ptr2] = value //
- cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task?
-} { .mbb
-(p_yy) add cnt = -1, cnt //
-(p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few
-;; }
-
-{ .mib
- nop.m 0
- shr.u linecnt = cnt, LSIZE_SH
-(p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill
-;; }
-
- TEXT_ALIGN(32) // --------------------- // L1A: store ahead into cache lines; fill later
-{ .mmi
- and tmp = -(LINE_SIZE), cnt // compute end of range
- mov ptr9 = ptr1 // used for prefetching
- and cnt = (LINE_SIZE-1), cnt // remainder
-} { .mmi
- mov loopcnt = PREF_AHEAD-1 // default prefetch loop
- cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
-;; }
-{ .mmi
-(p_scr) add loopcnt = -1, linecnt //
- add ptr2 = 8, ptr1 // start of stores (beyond prefetch stores)
- add ptr1 = tmp, ptr1 // first address beyond total range
-;; }
-{ .mmi
- add tmp = -1, linecnt // next loop count
- mov.i ar.lc = loopcnt //
-;; }
-.pref_l1a:
-{ .mib
- stf8 [ptr9] = fvalue, 128 // Do stores one cache line apart
- nop.i 0
- br.cloop.dptk.few .pref_l1a
-;; }
-{ .mmi
- add ptr0 = 16, ptr2 // Two stores in parallel
- mov.i ar.lc = tmp //
-;; }
-.l1ax:
- { .mmi
- stf8 [ptr2] = fvalue, 8
- stf8 [ptr0] = fvalue, 8
- ;; }
- { .mmi
- stf8 [ptr2] = fvalue, 24
- stf8 [ptr0] = fvalue, 24
- ;; }
- { .mmi
- stf8 [ptr2] = fvalue, 8
- stf8 [ptr0] = fvalue, 8
- ;; }
- { .mmi
- stf8 [ptr2] = fvalue, 24
- stf8 [ptr0] = fvalue, 24
- ;; }
- { .mmi
- stf8 [ptr2] = fvalue, 8
- stf8 [ptr0] = fvalue, 8
- ;; }
- { .mmi
- stf8 [ptr2] = fvalue, 24
- stf8 [ptr0] = fvalue, 24
- ;; }
- { .mmi
- stf8 [ptr2] = fvalue, 8
- stf8 [ptr0] = fvalue, 32
- cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
- ;; }
-{ .mmb
- stf8 [ptr2] = fvalue, 24
-(p_scr) stf8 [ptr9] = fvalue, 128
- br.cloop.dptk.few .l1ax
-;; }
-{ .mbb
- cmp.le p_scr, p0 = 8, cnt // just a few bytes left ?
-(p_scr) br.cond.dpnt.many .fraction_of_line // Branch no. 2
- br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3
-;; }
-
- TEXT_ALIGN(32)
-.l1b: // ------------------------------------ // L1B: store ahead into cache lines; fill later
-{ .mmi
- and tmp = -(LINE_SIZE), cnt // compute end of range
- mov ptr9 = ptr1 // used for prefetching
- and cnt = (LINE_SIZE-1), cnt // remainder
-} { .mmi
- mov loopcnt = PREF_AHEAD-1 // default prefetch loop
- cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
-;; }
-{ .mmi
-(p_scr) add loopcnt = -1, linecnt
- add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores)
- add ptr1 = tmp, ptr1 // first address beyond total range
-;; }
-{ .mmi
- add tmp = -1, linecnt // next loop count
- mov.i ar.lc = loopcnt
-;; }
-.pref_l1b:
-{ .mib
- stf.spill [ptr9] = f0, 128 // Do stores one cache line apart
- nop.i 0
- br.cloop.dptk.few .pref_l1b
-;; }
-{ .mmi
- add ptr0 = 16, ptr2 // Two stores in parallel
- mov.i ar.lc = tmp
-;; }
-.l1bx:
- { .mmi
- stf.spill [ptr2] = f0, 32
- stf.spill [ptr0] = f0, 32
- ;; }
- { .mmi
- stf.spill [ptr2] = f0, 32
- stf.spill [ptr0] = f0, 32
- ;; }
- { .mmi
- stf.spill [ptr2] = f0, 32
- stf.spill [ptr0] = f0, 64
- cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
- ;; }
-{ .mmb
- stf.spill [ptr2] = f0, 32
-(p_scr) stf.spill [ptr9] = f0, 128
- br.cloop.dptk.few .l1bx
-;; }
-{ .mib
- cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
-(p_scr) br.cond.dpnt.many .move_bytes_from_alignment //
-;; }
-
-.fraction_of_line:
-{ .mib
- add ptr2 = 16, ptr1
- shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32
-;; }
-{ .mib
- cmp.eq p_scr, p0 = loopcnt, r0
- add loopcnt = -1, loopcnt
-(p_scr) br.cond.dpnt.many .store_words
-;; }
-{ .mib
- and cnt = 0x1f, cnt // compute the remaining cnt
- mov.i ar.lc = loopcnt
-;; }
- TEXT_ALIGN(32)
-.l2: // ------------------------------------ // L2A: store 32B in 2 cycles
-{ .mmb
- stf8 [ptr1] = fvalue, 8
- stf8 [ptr2] = fvalue, 8
-;; } { .mmb
- stf8 [ptr1] = fvalue, 24
- stf8 [ptr2] = fvalue, 24
- br.cloop.dptk.many .l2
-;; }
-.store_words:
-{ .mib
- cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
-(p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch
-;; }
-
-{ .mmi
- stf8 [ptr1] = fvalue, 8 // store
- cmp.le p_y, p_n = 16, cnt
- add cnt = -8, cnt // subtract
-;; }
-{ .mmi
-(p_y) stf8 [ptr1] = fvalue, 8 // store
-(p_y) cmp.le.unc p_yy, p_nn = 16, cnt
-(p_y) add cnt = -8, cnt // subtract
-;; }
-{ .mmi // store
-(p_yy) stf8 [ptr1] = fvalue, 8
-(p_yy) add cnt = -8, cnt // subtract
-;; }
-
-.move_bytes_from_alignment:
-{ .mib
- cmp.eq p_scr, p0 = cnt, r0
- tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ?
-(p_scr) br.cond.dpnt.few .restore_and_exit
-;; }
-{ .mib
-(p_y) st4 [ptr1] = value,4
- tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ?
-;; }
-{ .mib
-(p_yy) st2 [ptr1] = value,2
- tbit.nz.unc p_y, p0 = cnt, 0 // should we terminate with a st1 ?
-;; }
-
-{ .mib
-(p_y) st1 [ptr1] = value
-;; }
-.restore_and_exit:
-{ .mib
- nop.m 0
- mov.i ar.lc = save_lc
- br.ret.sptk.many rp
-;; }
-
-.move_bytes_unaligned:
-{ .mmi
- .pred.rel "mutex",p_y, p_n
- .pred.rel "mutex",p_yy, p_nn
-(p_n) cmp.le p_yy, p_nn = 4, cnt
-(p_y) cmp.le p_yy, p_nn = 5, cnt
-(p_n) add ptr2 = 2, ptr1
-} { .mmi
-(p_y) add ptr2 = 3, ptr1
-(p_y) st1 [ptr1] = value, 1 // fill 1 (odd-aligned) byte [15, 14 (or less) left]
-(p_y) add cnt = -1, cnt
-;; }
-{ .mmi
-(p_yy) cmp.le.unc p_y, p0 = 8, cnt
- add ptr3 = ptr1, cnt // prepare last store
- mov.i ar.lc = save_lc
-} { .mmi
-(p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
-(p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [11, 10 (o less) left]
-(p_yy) add cnt = -4, cnt
-;; }
-{ .mmi
-(p_y) cmp.le.unc p_yy, p0 = 8, cnt
- add ptr3 = -1, ptr3 // last store
- tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ?
-} { .mmi
-(p_y) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
-(p_y) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [7, 6 (or less) left]
-(p_y) add cnt = -4, cnt
-;; }
-{ .mmi
-(p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
-(p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [3, 2 (or less) left]
- tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ?
-} { .mmi
-(p_yy) add cnt = -4, cnt
-;; }
-{ .mmb
-(p_scr) st2 [ptr1] = value // fill 2 (aligned) bytes
-(p_y) st1 [ptr3] = value // fill last byte (using ptr3)
- br.ret.sptk.many rp
-}
-END(memset)
diff --git a/xen/arch/ia64/linux/numa.c b/xen/arch/ia64/linux/numa.c
deleted file mode 100644
index 77118bbf3d..0000000000
--- a/xen/arch/ia64/linux/numa.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * This file contains NUMA specific variables and functions which can
- * be split away from DISCONTIGMEM and are used on NUMA machines with
- * contiguous memory.
- *
- * 2002/08/07 Erich Focht <efocht@ess.nec.de>
- */
-
-#include <linux/config.h>
-#include <linux/cpu.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/node.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <asm/mmzone.h>
-#include <asm/numa.h>
-
-
-/*
- * The following structures are usually initialized by ACPI or
- * similar mechanisms and describe the NUMA characteristics of the machine.
- */
-int num_node_memblks;
-struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
-struct node_cpuid_s node_cpuid[NR_CPUS];
-/*
- * This is a matrix with "distances" between nodes, they should be
- * proportional to the memory access latency ratios.
- */
-u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
-
-/* Identify which cnode a physical address resides on */
-int
-paddr_to_nid(unsigned long paddr)
-{
- int i;
-
- for (i = 0; i < num_node_memblks; i++)
- if (paddr >= node_memblk[i].start_paddr &&
- paddr < node_memblk[i].start_paddr + node_memblk[i].size)
- break;
-
- return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
-}
diff --git a/xen/arch/ia64/linux/pal.S b/xen/arch/ia64/linux/pal.S
deleted file mode 100644
index 0b533441c3..0000000000
--- a/xen/arch/ia64/linux/pal.S
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * PAL Firmware support
- * IA-64 Processor Programmers Reference Vol 2
- *
- * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co
- * David Mosberger <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- *
- * 05/22/2000 eranian Added support for stacked register calls
- * 05/24/2000 eranian Added support for physical mode static calls
- */
-
-#include <asm/asmmacro.h>
-#include <asm/processor.h>
-
- .data
-pal_entry_point:
- data8 ia64_pal_default_handler
- .text
-
-/*
- * Set the PAL entry point address. This could be written in C code, but we
- * do it here to keep it all in one module (besides, it's so trivial that it's
- * not a big deal).
- *
- * in0 Address of the PAL entry point (text address, NOT a function
- * descriptor).
- */
-GLOBAL_ENTRY(ia64_pal_handler_init)
- alloc r3=ar.pfs,1,0,0,0
- movl r2=pal_entry_point
- ;;
- st8 [r2]=in0
- br.ret.sptk.many rp
-END(ia64_pal_handler_init)
-
-/*
- * Default PAL call handler. This needs to be coded in assembly because it
- * uses the static calling convention, i.e., the RSE may not be used and
- * calls are done via "br.cond" (not "br.call").
- */
-GLOBAL_ENTRY(ia64_pal_default_handler)
- mov r8=-1
- br.cond.sptk.many rp
-END(ia64_pal_default_handler)
-
-/*
- * Make a PAL call using the static calling convention.
- *
- * in0 Index of PAL service
- * in1 - in3 Remaining PAL arguments
- */
-GLOBAL_ENTRY(ia64_pal_call_static)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
- alloc loc1 = ar.pfs,4,5,0,0
- movl loc2 = pal_entry_point
-1: {
- mov r28 = in0
- mov r29 = in1
- mov r8 = ip
- }
- ;;
- ld8 loc2 = [loc2] // loc2 <- entry point
- adds r8 = 1f-1b,r8
- mov loc4=ar.rsc // save RSE configuration
- ;;
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- mov loc3 = psr
- mov loc0 = rp
- .body
- mov r30 = in2
-
- mov r31 = in3
- mov b7 = loc2
-
- rsm psr.i
- ;;
- mov rp = r8
- br.cond.sptk.many b7
-1: mov psr.l = loc3
- mov ar.rsc = loc4 // restore RSE configuration
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
- srlz.d // seralize restoration of psr.l
- br.ret.sptk.many b0
-END(ia64_pal_call_static)
-
-/*
- * Make a PAL call using the stacked registers calling convention.
- *
- * Inputs:
- * in0 Index of PAL service
- * in2 - in3 Remaining PAL arguments
- */
-GLOBAL_ENTRY(ia64_pal_call_stacked)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
- alloc loc1 = ar.pfs,4,4,4,0
- movl loc2 = pal_entry_point
-
- mov r28 = in0 // Index MUST be copied to r28
- mov out0 = in0 // AND in0 of PAL function
- mov loc0 = rp
- .body
- ;;
- ld8 loc2 = [loc2] // loc2 <- entry point
- mov out1 = in1
- mov out2 = in2
- mov out3 = in3
- mov loc3 = psr
- ;;
- rsm psr.i
- mov b7 = loc2
- ;;
- br.call.sptk.many rp=b7 // now make the call
-.ret0: mov psr.l = loc3
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
- srlz.d // serialize restoration of psr.l
- br.ret.sptk.many b0
-END(ia64_pal_call_stacked)
-
-/*
- * Make a physical mode PAL call using the static registers calling convention.
- *
- * Inputs:
- * in0 Index of PAL service
- * in2 - in3 Remaining PAL arguments
- *
- * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
- * So we don't need to clear them.
- */
-#define PAL_PSR_BITS_TO_CLEAR \
- (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT |\
- IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
- IA64_PSR_DFL | IA64_PSR_DFH)
-
-#define PAL_PSR_BITS_TO_SET \
- (IA64_PSR_BN)
-
-
-GLOBAL_ENTRY(ia64_pal_call_phys_static)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
- alloc loc1 = ar.pfs,4,7,0,0
- movl loc2 = pal_entry_point
-1: {
- mov r28 = in0 // copy procedure index
- mov r8 = ip // save ip to compute branch
- mov loc0 = rp // save rp
- }
- .body
- ;;
- ld8 loc2 = [loc2] // loc2 <- entry point
- mov r29 = in1 // first argument
- mov r30 = in2 // copy arg2
- mov r31 = in3 // copy arg3
- ;;
- mov loc3 = psr // save psr
- adds r8 = 1f-1b,r8 // calculate return address for call
- ;;
- mov loc4=ar.rsc // save RSE configuration
- dep.z loc2=loc2,0,61 // convert pal entry point to physical
- tpa r8=r8 // convert rp to physical
- ;;
- mov b7 = loc2 // install target to branch reg
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- movl r16=PAL_PSR_BITS_TO_CLEAR
- movl r17=PAL_PSR_BITS_TO_SET
- ;;
- or loc3=loc3,r17 // add in psr the bits to set
- ;;
- andcm r16=loc3,r16 // removes bits to clear from psr
- br.call.sptk.many rp=ia64_switch_mode_phys
- mov rp = r8 // install return address (physical)
- mov loc5 = r19
- mov loc6 = r20
- br.cond.sptk.many b7
-1:
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- mov r16=loc3 // r16= original psr
- mov r19=loc5
- mov r20=loc6
- br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
- mov psr.l = loc3 // restore init PSR
-
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
- mov ar.rsc=loc4 // restore RSE configuration
- srlz.d // seralize restoration of psr.l
- br.ret.sptk.many b0
-END(ia64_pal_call_phys_static)
-
-/*
- * Make a PAL call using the stacked registers in physical mode.
- *
- * Inputs:
- * in0 Index of PAL service
- * in2 - in3 Remaining PAL arguments
- */
-GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
- alloc loc1 = ar.pfs,5,7,4,0
- movl loc2 = pal_entry_point
-1: {
- mov r28 = in0 // copy procedure index
- mov loc0 = rp // save rp
- }
- .body
- ;;
- ld8 loc2 = [loc2] // loc2 <- entry point
- mov loc3 = psr // save psr
- ;;
- mov loc4=ar.rsc // save RSE configuration
- dep.z loc2=loc2,0,61 // convert pal entry point to physical
- ;;
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- movl r16=PAL_PSR_BITS_TO_CLEAR
- movl r17=PAL_PSR_BITS_TO_SET
- ;;
- or loc3=loc3,r17 // add in psr the bits to set
- mov b7 = loc2 // install target to branch reg
- ;;
- andcm r16=loc3,r16 // removes bits to clear from psr
- br.call.sptk.many rp=ia64_switch_mode_phys
-
- mov out0 = in0 // first argument
- mov out1 = in1 // copy arg2
- mov out2 = in2 // copy arg3
- mov out3 = in3 // copy arg3
- mov loc5 = r19
- mov loc6 = r20
-
- br.call.sptk.many rp=b7 // now make the call
-
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- mov r16=loc3 // r16= original psr
- mov r19=loc5
- mov r20=loc6
- br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
-
- mov psr.l = loc3 // restore init PSR
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
- mov ar.rsc=loc4 // restore RSE configuration
- srlz.d // seralize restoration of psr.l
- br.ret.sptk.many b0
-END(ia64_pal_call_phys_stacked)
-
-/*
- * Save scratch fp scratch regs which aren't saved in pt_regs already
- * (fp10-fp15).
- *
- * NOTE: We need to do this since firmware (SAL and PAL) may use any of the
- * scratch regs fp-low partition.
- *
- * Inputs:
- * in0 Address of stack storage for fp regs
- */
-GLOBAL_ENTRY(ia64_save_scratch_fpregs)
- alloc r3=ar.pfs,1,0,0,0
- add r2=16,in0
- ;;
- stf.spill [in0] = f10,32
- stf.spill [r2] = f11,32
- ;;
- stf.spill [in0] = f12,32
- stf.spill [r2] = f13,32
- ;;
- stf.spill [in0] = f14,32
- stf.spill [r2] = f15,32
- br.ret.sptk.many rp
-END(ia64_save_scratch_fpregs)
-
-/*
- * Load scratch fp scratch regs (fp10-fp15)
- *
- * Inputs:
- * in0 Address of stack storage for fp regs
- */
-GLOBAL_ENTRY(ia64_load_scratch_fpregs)
- alloc r3=ar.pfs,1,0,0,0
- add r2=16,in0
- ;;
- ldf.fill f10 = [in0],32
- ldf.fill f11 = [r2],32
- ;;
- ldf.fill f12 = [in0],32
- ldf.fill f13 = [r2],32
- ;;
- ldf.fill f14 = [in0],32
- ldf.fill f15 = [r2],32
- br.ret.sptk.many rp
-END(ia64_load_scratch_fpregs)
diff --git a/xen/arch/ia64/linux/pcdp.h b/xen/arch/ia64/linux/pcdp.h
deleted file mode 100644
index ce910d68bd..0000000000
--- a/xen/arch/ia64/linux/pcdp.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Definitions for PCDP-defined console devices
- *
- * v1.0a: http://www.dig64.org/specifications/DIG64_HCDPv10a_01.pdf
- * v2.0: http://www.dig64.org/specifications/DIG64_PCDPv20.pdf
- *
- * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P.
- * Khalid Aziz <khalid.aziz@hp.com>
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define PCDP_CONSOLE 0
-#define PCDP_DEBUG 1
-#define PCDP_CONSOLE_OUTPUT 2
-#define PCDP_CONSOLE_INPUT 3
-
-#define PCDP_UART (0 << 3)
-#define PCDP_VGA (1 << 3)
-#define PCDP_USB (2 << 3)
-
-/* pcdp_uart.type and pcdp_device.type */
-#define PCDP_CONSOLE_UART (PCDP_UART | PCDP_CONSOLE)
-#define PCDP_DEBUG_UART (PCDP_UART | PCDP_DEBUG)
-#define PCDP_CONSOLE_VGA (PCDP_VGA | PCDP_CONSOLE_OUTPUT)
-#define PCDP_CONSOLE_USB (PCDP_USB | PCDP_CONSOLE_INPUT)
-
-/* pcdp_uart.flags */
-#define PCDP_UART_EDGE_SENSITIVE (1 << 0)
-#define PCDP_UART_ACTIVE_LOW (1 << 1)
-#define PCDP_UART_PRIMARY_CONSOLE (1 << 2)
-#define PCDP_UART_IRQ (1 << 6) /* in pci_func for rev < 3 */
-#define PCDP_UART_PCI (1 << 7) /* in pci_func for rev < 3 */
-
-struct pcdp_uart {
- u8 type;
- u8 bits;
- u8 parity;
- u8 stop_bits;
- u8 pci_seg;
- u8 pci_bus;
- u8 pci_dev;
- u8 pci_func;
- u64 baud;
- struct acpi_generic_address addr;
- u16 pci_dev_id;
- u16 pci_vendor_id;
- u32 gsi;
- u32 clock_rate;
- u8 pci_prog_intfc;
- u8 flags;
- u16 conout_index;
- u32 reserved;
-} __attribute__((packed));
-
-#define PCDP_IF_PCI 1
-
-/* pcdp_if_pci.trans */
-#define PCDP_PCI_TRANS_IOPORT 0x02
-#define PCDP_PCI_TRANS_MMIO 0x01
-
-struct pcdp_if_pci {
- u8 interconnect;
- u8 reserved;
- u16 length;
- u8 segment;
- u8 bus;
- u8 dev;
- u8 fun;
- u16 dev_id;
- u16 vendor_id;
- u32 acpi_interrupt;
- u64 mmio_tra;
- u64 ioport_tra;
- u8 flags;
- u8 trans;
-} __attribute__((packed));
-
-struct pcdp_vga {
- u8 count; /* address space descriptors */
-} __attribute__((packed));
-
-/* pcdp_device.flags */
-#define PCDP_PRIMARY_CONSOLE 1
-
-struct pcdp_device {
- u8 type;
- u8 flags;
- u16 length;
- u16 efi_index;
- /* next data is pcdp_if_pci or pcdp_if_acpi (not yet supported) */
- /* next data is device specific type (currently only pcdp_vga) */
-} __attribute__((packed));
-
-struct pcdp {
- u8 signature[4];
- u32 length;
- u8 rev; /* PCDP v2.0 is rev 3 */
- u8 chksum;
- u8 oemid[6];
- u8 oem_tabid[8];
- u32 oem_rev;
- u8 creator_id[4];
- u32 creator_rev;
- u32 num_uarts;
- struct pcdp_uart uart[0]; /* actual size is num_uarts */
- /* remainder of table is pcdp_device structures */
-} __attribute__((packed));
diff --git a/xen/arch/ia64/linux/sn/Makefile b/xen/arch/ia64/linux/sn/Makefile
deleted file mode 100644
index 24687f3086..0000000000
--- a/xen/arch/ia64/linux/sn/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-subdir-y += kernel
-subdir-y += pci
diff --git a/xen/arch/ia64/linux/sn/kernel/Makefile b/xen/arch/ia64/linux/sn/kernel/Makefile
deleted file mode 100644
index 54a9da7ca7..0000000000
--- a/xen/arch/ia64/linux/sn/kernel/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-y += machvec.o
-obj-y += pio_phys.o
-obj-y += ptc_deadlock.o
diff --git a/xen/arch/ia64/linux/sn/kernel/README.origin b/xen/arch/ia64/linux/sn/kernel/README.origin
deleted file mode 100644
index 9f77d132f9..0000000000
--- a/xen/arch/ia64/linux/sn/kernel/README.origin
+++ /dev/null
@@ -1,9 +0,0 @@
-Source files in this directory are identical copies of linux-2.6.19 files:
-
-NOTE: DO NOT commit changes to these files! If a file
-needs to be changed, move it to ../linux-xen and follow
-the instructions in the README there.
-
-machvec.c -> linux/arch/ia64/sn/kernel/machvec.c
-pio_phys.S -> linux/arch/ia64/sn/kernel/pio_phys.S
-ptc_deadlock.S -> linux/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
diff --git a/xen/arch/ia64/linux/sn/kernel/machvec.c b/xen/arch/ia64/linux/sn/kernel/machvec.c
deleted file mode 100644
index 02bb915584..0000000000
--- a/xen/arch/ia64/linux/sn/kernel/machvec.c
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-#define MACHVEC_PLATFORM_NAME sn2
-#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn2.h>
-#include <asm/machvec_init.h>
diff --git a/xen/arch/ia64/linux/sn/kernel/pio_phys.S b/xen/arch/ia64/linux/sn/kernel/pio_phys.S
deleted file mode 100644
index 3c7d48d6ec..0000000000
--- a/xen/arch/ia64/linux/sn/kernel/pio_phys.S
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
- *
- * This file contains macros used to access MMR registers via
- * uncached physical addresses.
- * pio_phys_read_mmr - read an MMR
- * pio_phys_write_mmr - write an MMR
- * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
- * Second MMR will be skipped if address is NULL
- *
- * Addresses passed to these routines should be uncached physical addresses
- * ie., 0x80000....
- */
-
-
-
-#include <asm/asmmacro.h>
-#include <asm/page.h>
-
-GLOBAL_ENTRY(pio_phys_read_mmr)
- .prologue
- .regstk 1,0,0,0
- .body
- mov r2=psr
- rsm psr.i | psr.dt
- ;;
- srlz.d
- ld8.acq r8=[r32]
- ;;
- mov psr.l=r2;;
- srlz.d
- br.ret.sptk.many rp
-END(pio_phys_read_mmr)
-
-GLOBAL_ENTRY(pio_phys_write_mmr)
- .prologue
- .regstk 2,0,0,0
- .body
- mov r2=psr
- rsm psr.i | psr.dt
- ;;
- srlz.d
- st8.rel [r32]=r33
- ;;
- mov psr.l=r2;;
- srlz.d
- br.ret.sptk.many rp
-END(pio_phys_write_mmr)
-
-GLOBAL_ENTRY(pio_atomic_phys_write_mmrs)
- .prologue
- .regstk 4,0,0,0
- .body
- mov r2=psr
- cmp.ne p9,p0=r34,r0;
- rsm psr.i | psr.dt | psr.ic
- ;;
- srlz.d
- st8.rel [r32]=r33
-(p9) st8.rel [r34]=r35
- ;;
- mov psr.l=r2;;
- srlz.d
- br.ret.sptk.many rp
-END(pio_atomic_phys_write_mmrs)
-
-
diff --git a/xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S b/xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S
deleted file mode 100644
index bebbcc4f8d..0000000000
--- a/xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <asm/types.h>
-#include <asm/sn/shub_mmr.h>
-
-#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
-#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
-#define ALIAS_OFFSET 8
-
-
- .global sn2_ptc_deadlock_recovery_core
- .proc sn2_ptc_deadlock_recovery_core
-
-sn2_ptc_deadlock_recovery_core:
- .regstk 6,0,0,0
-
- ptc0 = in0
- data0 = in1
- ptc1 = in2
- data1 = in3
- piowc = in4
- zeroval = in5
- piowcphy = r30
- psrsave = r2
- scr1 = r16
- scr2 = r17
- mask = r18
-
-
- extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address
- dep piowcphy=-1,piowcphy,63,1
- movl mask=WRITECOUNTMASK
- mov r8=r0
-
-1:
- cmp.ne p8,p9=r0,ptc1 // Test for shub type (ptc1 non-null on shub1)
- // p8 = 1 if shub1, p9 = 1 if shub2
-
- add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register
- mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR
-(p8) st8.rel [scr2]=scr1;;
-(p9) ld8.acq scr1=[scr2];;
-
-5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete.
- hint @pause
- and scr2=scr1,mask;; // mask of writecount bits
- cmp.ne p6,p0=zeroval,scr2
-(p6) br.cond.sptk 5b
-
-
-
- ////////////// BEGIN PHYSICAL MODE ////////////////////
- mov psrsave=psr // Disable IC (no PMIs)
- rsm psr.i | psr.dt | psr.ic;;
- srlz.i;;
-
- st8.rel [ptc0]=data0 // Write PTC0 & wait for completion.
-
-5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
- hint @pause
- and scr2=scr1,mask;; // mask of writecount bits
- cmp.ne p6,p0=zeroval,scr2
-(p6) br.cond.sptk 5b;;
-
- tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
-(p7) cmp.ne p7,p0=r0,ptc1;; // Test for non-null ptc1
-
-(p7) st8.rel [ptc1]=data1;; // Now write PTC1.
-
-5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
- hint @pause
- and scr2=scr1,mask;; // mask of writecount bits
- cmp.ne p6,p0=zeroval,scr2
-(p6) br.cond.sptk 5b
-
- tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK
-
- mov psr.l=psrsave;; // Reenable IC
- srlz.i;;
- ////////////// END PHYSICAL MODE ////////////////////
-
-(p8) add r8=1,r8
-(p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred.
-
- br.ret.sptk rp
- .endp sn2_ptc_deadlock_recovery_core
diff --git a/xen/arch/ia64/linux/sn/pci/Makefile b/xen/arch/ia64/linux/sn/pci/Makefile
deleted file mode 100644
index 4c866bc776..0000000000
--- a/xen/arch/ia64/linux/sn/pci/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y += pcibr
diff --git a/xen/arch/ia64/linux/sn/pci/pcibr/Makefile b/xen/arch/ia64/linux/sn/pci/pcibr/Makefile
deleted file mode 100644
index 4039c25ab4..0000000000
--- a/xen/arch/ia64/linux/sn/pci/pcibr/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y += pcibr_reg.o
diff --git a/xen/arch/ia64/linux/sn/pci/pcibr/README.origin b/xen/arch/ia64/linux/sn/pci/pcibr/README.origin
deleted file mode 100644
index 45d03b049c..0000000000
--- a/xen/arch/ia64/linux/sn/pci/pcibr/README.origin
+++ /dev/null
@@ -1,7 +0,0 @@
-Source files in this directory are identical copies of linux-2.6.19 files:
-
-NOTE: DO NOT commit changes to these files! If a file
-needs to be changed, move it to ../linux-xen and follow
-the instructions in the README there.
-
-pcibr_reg.c -> linux/arch/ia64/sn/pci/pcibr/pcibr_reg.c
diff --git a/xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c b/xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c
deleted file mode 100644
index 8b8bbd51d4..0000000000
--- a/xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <asm/sn/io.h>
-#include <asm/sn/pcibr_provider.h>
-#include <asm/sn/pcibus_provider_defs.h>
-#include <asm/sn/pcidev.h>
-#include <asm/sn/pic.h>
-#include <asm/sn/tiocp.h>
-
-union br_ptr {
- struct tiocp tio;
- struct pic pic;
-};
-
-/*
- * Control Register Access -- Read/Write 0000_0020
- */
-void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
-{
- union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
-
- if (pcibus_info) {
- switch (pcibus_info->pbi_bridge_type) {
- case PCIBR_BRIDGETYPE_TIOCP:
- __sn_clrq_relaxed(&ptr->tio.cp_control, bits);
- break;
- case PCIBR_BRIDGETYPE_PIC:
- __sn_clrq_relaxed(&ptr->pic.p_wid_control, bits);
- break;
- default:
- panic
- ("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
- ptr);
- }
- }
-}
-
-void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits)
-{
- union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
-
- if (pcibus_info) {
- switch (pcibus_info->pbi_bridge_type) {
- case PCIBR_BRIDGETYPE_TIOCP:
- __sn_setq_relaxed(&ptr->tio.cp_control, bits);
- break;
- case PCIBR_BRIDGETYPE_PIC:
- __sn_setq_relaxed(&ptr->pic.p_wid_control, bits);
- break;
- default:
- panic
- ("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
- ptr);
- }
- }
-}
-
-/*
- * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
- */
-u64 pcireg_tflush_get(struct pcibus_info *pcibus_info)
-{
- union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
- u64 ret = 0;
-
- if (pcibus_info) {
- switch (pcibus_info->pbi_bridge_type) {
- case PCIBR_BRIDGETYPE_TIOCP:
- ret = __sn_readq_relaxed(&ptr->tio.cp_tflush);
- break;
- case PCIBR_BRIDGETYPE_PIC:
- ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush);
- break;
- default:
- panic
- ("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
- ptr);
- }
- }
-
- /* Read of the Target Flush should always return zero */
- if (ret != 0)
- panic("pcireg_tflush_get:Target Flush failed\n");
-
- return ret;
-}
-
-/*
- * Interrupt Status Register Access -- Read Only 0000_0100
- */
-u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info)
-{
- union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
- u64 ret = 0;
-
- if (pcibus_info) {
- switch (pcibus_info->pbi_bridge_type) {
- case PCIBR_BRIDGETYPE_TIOCP:
- ret = __sn_readq_relaxed(&ptr->tio.cp_int_status);
- break;
- case PCIBR_BRIDGETYPE_PIC:
- ret = __sn_readq_relaxed(&ptr->pic.p_int_status);
- break;
- default:
- panic
- ("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
- ptr);
- }
- }
- return ret;
-}
-
-/*
- * Interrupt Enable Register Access -- Read/Write 0000_0108
- */
-void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
-{
- union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
-
- if (pcibus_info) {
- switch (pcibus_info->pbi_bridge_type) {
- case PCIBR_BRIDGETYPE_TIOCP:
- __sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits);
- break;
- case PCIBR_BRIDGETYPE_PIC:
- __sn_clrq_relaxed(&ptr->pic.p_int_enable, bits);
- break;
- default:
- panic
- ("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
- ptr);
- }
- }
-}
-
-void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits)
-{
- union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
-
- if (pcibus_info) {
- switch (pcibus_info->pbi_bridge_type) {
- case PCIBR_BRIDGETYPE_TIOCP:
- __sn_setq_relaxed(&ptr->tio.cp_int_enable, bits);
- break;
- case PCIBR_BRIDGETYPE_PIC:
- __sn_setq_relaxed(&ptr->pic.p_int_enable, bits);
- break;
- default:
- panic
- ("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
- ptr);
- }
- }
-}
-
-/*
- * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
- */
-void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
- u64 addr)
-{
- union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
-
- if (pcibus_info) {
- switch (pcibus_info->pbi_bridge_type) {
- case PCIBR_BRIDGETYPE_TIOCP:
- __sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n],
- TIOCP_HOST_INTR_ADDR);
- __sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n],
- (addr & TIOCP_HOST_INTR_ADDR));
- break;
- case PCIBR_BRIDGETYPE_PIC:
- __sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n],
- PIC_HOST_INTR_ADDR);
- __sn_setq_relaxed(&ptr->pic.p_int_addr[int_n],
- (addr & PIC_HOST_INTR_ADDR));
- break;
- default:
- panic
- ("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
- ptr);
- }
- }
-}
-
-/*
- * Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
- */
-void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
-{
- union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
-
- if (pcibus_info) {
- switch (pcibus_info->pbi_bridge_type) {
- case PCIBR_BRIDGETYPE_TIOCP:
- writeq(1, &ptr->tio.cp_force_pin[int_n]);
- break;
- case PCIBR_BRIDGETYPE_PIC:
- writeq(1, &ptr->pic.p_force_pin[int_n]);
- break;
- default:
- panic
- ("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
- ptr);
- }
- }
-}
-
-/*
- * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
- */
-u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
-{
- union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
- u64 ret = 0;
-
- if (pcibus_info) {
- switch (pcibus_info->pbi_bridge_type) {
- case PCIBR_BRIDGETYPE_TIOCP:
- ret =
- __sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]);
- break;
- case PCIBR_BRIDGETYPE_PIC:
- ret =
- __sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]);
- break;
- default:
- panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", ptr);
- }
-
- }
- /* Read of the Write Buffer Flush should always return zero */
- return ret;
-}
-
-void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
- u64 val)
-{
- union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
-
- if (pcibus_info) {
- switch (pcibus_info->pbi_bridge_type) {
- case PCIBR_BRIDGETYPE_TIOCP:
- writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]);
- break;
- case PCIBR_BRIDGETYPE_PIC:
- writeq(val, &ptr->pic.p_int_ate_ram[ate_index]);
- break;
- default:
- panic
- ("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
- ptr);
- }
- }
-}
-
-u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
-{
- union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
- u64 __iomem *ret = NULL;
-
- if (pcibus_info) {
- switch (pcibus_info->pbi_bridge_type) {
- case PCIBR_BRIDGETYPE_TIOCP:
- ret = &ptr->tio.cp_int_ate_ram[ate_index];
- break;
- case PCIBR_BRIDGETYPE_PIC:
- ret = &ptr->pic.p_int_ate_ram[ate_index];
- break;
- default:
- panic
- ("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
- ptr);
- }
- }
- return ret;
-}
diff --git a/xen/arch/ia64/linux/strlen.S b/xen/arch/ia64/linux/strlen.S
deleted file mode 100644
index e0cdac0a85..0000000000
--- a/xen/arch/ia64/linux/strlen.S
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- *
- * Optimized version of the standard strlen() function
- *
- *
- * Inputs:
- * in0 address of string
- *
- * Outputs:
- * ret0 the number of characters in the string (0 if empty string)
- * does not count the \0
- *
- * Copyright (C) 1999, 2001 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- *
- * 09/24/99 S.Eranian add speculation recovery code
- */
-
-#include <asm/asmmacro.h>
-
-//
-//
-// This is an enhanced version of the basic strlen. it includes a combination
-// of compute zero index (czx), parallel comparisons, speculative loads and
-// loop unroll using rotating registers.
-//
-// General Ideas about the algorithm:
-// The goal is to look at the string in chunks of 8 bytes.
-// so we need to do a few extra checks at the beginning because the
-// string may not be 8-byte aligned. In this case we load the 8byte
-// quantity which includes the start of the string and mask the unused
-// bytes with 0xff to avoid confusing czx.
-// We use speculative loads and software pipelining to hide memory
-// latency and do read ahead safely. This way we defer any exception.
-//
-// Because we don't want the kernel to be relying on particular
-// settings of the DCR register, we provide recovery code in case
-// speculation fails. The recovery code is going to "redo" the work using
-// only normal loads. If we still get a fault then we generate a
-// kernel panic. Otherwise we return the strlen as usual.
-//
-// The fact that speculation may fail can be caused, for instance, by
-// the DCR.dm bit being set. In this case TLB misses are deferred, i.e.,
-// a NaT bit will be set if the translation is not present. The normal
-// load, on the other hand, will cause the translation to be inserted
-// if the mapping exists.
-//
-// It should be noted that we execute recovery code only when we need
-// to use the data that has been speculatively loaded: we don't execute
-// recovery code on pure read ahead data.
-//
-// Remarks:
-// - the cmp r0,r0 is used as a fast way to initialize a predicate
-// register to 1. This is required to make sure that we get the parallel
-// compare correct.
-//
-// - we don't use the epilogue counter to exit the loop but we need to set
-// it to zero beforehand.
-//
-// - after the loop we must test for Nat values because neither the
-// czx nor cmp instruction raise a NaT consumption fault. We must be
-// careful not to look too far for a Nat for which we don't care.
-// For instance we don't need to look at a NaT in val2 if the zero byte
-// was in val1.
-//
-// - Clearly performance tuning is required.
-//
-//
-//
-#define saved_pfs r11
-#define tmp r10
-#define base r16
-#define orig r17
-#define saved_pr r18
-#define src r19
-#define mask r20
-#define val r21
-#define val1 r22
-#define val2 r23
-
-GLOBAL_ENTRY(strlen)
- .prologue
- .save ar.pfs, saved_pfs
- alloc saved_pfs=ar.pfs,11,0,0,8 // rotating must be multiple of 8
-
- .rotr v[2], w[2] // declares our 4 aliases
-
- extr.u tmp=in0,0,3 // tmp=least significant 3 bits
- mov orig=in0 // keep trackof initial byte address
- dep src=0,in0,0,3 // src=8byte-aligned in0 address
- .save pr, saved_pr
- mov saved_pr=pr // preserve predicates (rotation)
- ;;
-
- .body
-
- ld8 v[1]=[src],8 // must not speculate: can fail here
- shl tmp=tmp,3 // multiply by 8bits/byte
- mov mask=-1 // our mask
- ;;
- ld8.s w[1]=[src],8 // speculatively load next
- cmp.eq p6,p0=r0,r0 // sets p6 to true for cmp.and
- sub tmp=64,tmp // how many bits to shift our mask on the right
- ;;
- shr.u mask=mask,tmp // zero enough bits to hold v[1] valuable part
- mov ar.ec=r0 // clear epilogue counter (saved in ar.pfs)
- ;;
- add base=-16,src // keep track of aligned base
- or v[1]=v[1],mask // now we have a safe initial byte pattern
- ;;
-1:
- ld8.s v[0]=[src],8 // speculatively load next
- czx1.r val1=v[1] // search 0 byte from right
- czx1.r val2=w[1] // search 0 byte from right following 8bytes
- ;;
- ld8.s w[0]=[src],8 // speculatively load next to next
- cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8
- cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8
-(p6) br.wtop.dptk 1b // loop until p6 == 0
- ;;
- //
- // We must return try the recovery code iff
- // val1_is_nat || (val1==8 && val2_is_nat)
- //
- // XXX Fixme
- // - there must be a better way of doing the test
- //
- cmp.eq p8,p9=8,val1 // p6 = val1 had zero (disambiguate)
- tnat.nz p6,p7=val1 // test NaT on val1
-(p6) br.cond.spnt .recover // jump to recovery if val1 is NaT
- ;;
- //
- // if we come here p7 is true, i.e., initialized for // cmp
- //
- cmp.eq.and p7,p0=8,val1// val1==8?
- tnat.nz.and p7,p0=val2 // test NaT if val2
-(p7) br.cond.spnt .recover // jump to recovery if val2 is NaT
- ;;
-(p8) mov val1=val2 // the other test got us out of the loop
-(p8) adds src=-16,src // correct position when 3 ahead
-(p9) adds src=-24,src // correct position when 4 ahead
- ;;
- sub ret0=src,orig // distance from base
- sub tmp=8,val1 // which byte in word
- mov pr=saved_pr,0xffffffffffff0000
- ;;
- sub ret0=ret0,tmp // adjust
- mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
- br.ret.sptk.many rp // end of normal execution
-
- //
- // Outlined recovery code when speculation failed
- //
- // This time we don't use speculation and rely on the normal exception
- // mechanism. that's why the loop is not as good as the previous one
- // because read ahead is not possible
- //
- // IMPORTANT:
- // Please note that in the case of strlen() as opposed to strlen_user()
- // we don't use the exception mechanism, as this function is not
- // supposed to fail. If that happens it means we have a bug and the
- // code will cause of kernel fault.
- //
- // XXX Fixme
- // - today we restart from the beginning of the string instead
- // of trying to continue where we left off.
- //
-.recover:
- ld8 val=[base],8 // will fail if unrecoverable fault
- ;;
- or val=val,mask // remask first bytes
- cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop
- ;;
- //
- // ar.ec is still zero here
- //
-2:
-(p6) ld8 val=[base],8 // will fail if unrecoverable fault
- ;;
- czx1.r val1=val // search 0 byte from right
- ;;
- cmp.eq p6,p0=8,val1 // val1==8 ?
-(p6) br.wtop.dptk 2b // loop until p6 == 0
- ;; // (avoid WAW on p63)
- sub ret0=base,orig // distance from base
- sub tmp=8,val1
- mov pr=saved_pr,0xffffffffffff0000
- ;;
- sub ret0=ret0,tmp // length=now - back -1
- mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
- br.ret.sptk.many rp // end of successful recovery code
-END(strlen)
diff --git a/xen/arch/ia64/tools/README.RunVT b/xen/arch/ia64/tools/README.RunVT
deleted file mode 100644
index 0e0848e53f..0000000000
--- a/xen/arch/ia64/tools/README.RunVT
+++ /dev/null
@@ -1,46 +0,0 @@
-INSTRUCTIONS FOR Running IPF/Xen with VT-enabled Tiger4 pltform
-
-1. Install a Linux Disk, VT_Disk, to be used by VT
-2. Setup the target VT_Disk
- 1. Boot VT_Disk
- 2. modify following files of VT_Disk
- /boot/efi/efi/redhat/elilo.conf -
- modify "append=" line to have "root=/dev/hda3"
- ** note /dev/hda3 must reflect VT_Disk /root partition
-
- /etc/fstab -
- LABEL=/ / ext3 DEFAULTS 1 1
- to
- /dev/hda3 / ext3 DEFAULTS 1 1
- and other entries accordingly
-3. Install Xen and boot XenLinux on your standard Linux disk
- 1. modify /boot/efi/efi/redhat/elilo.conf -
- "append=" entry to have "root=/dev/sda3"
- 2. modify /etc/fstab -
- LABEL=/ / ext3 DEFAULTS 1 1
- to
- /dev/sda3 / ext3 DEFAULTS 1 1
- and other entries accordingly
-4. Reboot XenLinux with VT_Disk in /dev/sdb slot
- 1. copy Guest_Firmware.bin into /usr/lib/xen/boot/guest_firmware.bin
- 2. modify /etc/xen/xmexample.vti
- disk = [ 'phy:/dev/sdb,ioemu:hda,w' ]
- and make sure
- kernel=/usr/lib/xen/boot/guest_firmware.bin
-5. Make sure XenLinux has SDL installed by
- > rpm -q -a | grep SDL
- SDL-1.2.7-8 SDL-devel-1.2.7-8
-6. Start vncserver from XenLinux
- 1. ifconfig to get XenLinux IP address
- 2. vncserver
-7. Start VT Domain
- 1. From a remote system connect to XenLinux through vnc viewer
- 2. On vnc windows
- > xend start
- > xm create /etc/xen/xmexample.vti
- an EFI shell will popup
- > fs0:
- fs0:> cd efi\redhat
- fs0:> elilo linux
-
-
diff --git a/xen/arch/ia64/tools/README.xenia64 b/xen/arch/ia64/tools/README.xenia64
deleted file mode 100644
index 6eef8d26be..0000000000
--- a/xen/arch/ia64/tools/README.xenia64
+++ /dev/null
@@ -1,98 +0,0 @@
-# Recipe of Booting up Xen/dom0/domU on IA64 system
-# 06/02/2006 Written by Xen-ia64-devel community.
-
-My enviroment is;
- Machine : Tiger4
- Domain0 OS : RHEL4 U2
- DomainU OS : RHEL4 U2
-
-----------------------------
-Build xen
- 1. Download source
- # hg clone http://xenbits.xensource.com/ext/ia64/xen-unstable.hg
- # cd xen-unstable.hg
- # hg clone http://xenbits.xensource.com/ext/ia64/linux-2.6.18-xen.hg
-
- 2. # make world
-
- 3. # make install-tools
-
- 4. copy kernels and xen
- # cp xen/xen.gz /boot/efi/efi/redhat/
- # cp build-linux-2.6.18-xen_ia64/vmlinux.gz /boot/efi/efi/redhat/vmlinuz-2.6.18.8-xen
-
- 5. make initrd for Dom0/DomU
- # make -C linux-2.6.18-xen.hg ARCH=ia64 modules_install O=$(/bin/pwd)/build-linux-2.6.18-xen_ia64
- # mkinitrd -f /boot/efi/efi/redhat/initrd-2.6.18.8-xen.img 2.6.18.8-xen --builtin mptbase --builtin mptscsih
-
----------------------------------
-Make OSimg for DomU
- 1. make file
- # dd if=/dev/zero of=/root/rhel4.img bs=1M seek=4096 count=0
- # mke2fs -F -j /root/rhel4.img
- # mount -o loop /root/rhel4.img /mnt
- # cp -ax /{dev,var,etc,usr,bin,sbin,lib} /mnt
- # mkdir /mnt/{root,proc,sys,home,tmp}
-
- 2. modify DomU's fstab
- # vi /mnt/etc/fstab
- /dev/xvda1 / ext3 defaults 1 1
- none /dev/pts devpts gid=5,mode=620 0 0
- none /dev/shm tmpfs defaults 0 0
- none /proc proc defaults 0 0
- none /sys sysfs defaults 0 0
-
- 3. modify inittab
- set runlevel to 3 to avoid X trying to start
- # vi /mnt/etc/inittab
- id:3:initdefault:
- Start a getty on the xvc0 console
- X0:2345:respawn:/sbin/mingetty xvc0
- tty1-6 mingetty can be commented out
-
- 4. add xvc0 into /etc/securetty
- # vi /mnt/etc/securetty (add xvc0)
-
- 5. umount
- # umount /mnt
-
--------------------------------------
-Boot Xen & Domain0
- 1. replace elilo
- download from the below
- http://elilo.sourceforge.net/cgi-bin/blosxom
- and copy into /boot/efi/efi/redhat/
- # cp elilo-3.6-ia64.efi /boot/efi/efi/redhat/elilo.efi
-
-
- 2. modify elilo.conf (like the below)
- # vi /boot/efi/efi/redhat/elilo.conf
- prompt
- timeout=20
- default=xen
- relocatable
-
- image=vmlinuz-2.6.18.8-xen
- label=xen
- vmm=xen.gz
- initrd=initrd-2.6.18.8-xen.img
- read-only
- append=" -- rhgb root=/dev/sda2"
-
--------------------------------------
-Boot DomainU
- 1. make config of DomU
- # vi /etc/xen/rhel4
- kernel = "/boot/efi/efi/redhat/vmlinuz-2.6.18.8-xen"
- ramdisk = "/boot/efi/efi/redhat/initrd-2.6.18.8-xen.img"
- memory = 384
- name = "rhel4"
- disk = [ 'file:/root/rhel4.img,xvda1,w' ]
- root = "/dev/xvda1 ro"
-
- 2. After boot xen and dom0, start xend
- # /etc/init.d/xend start
- ( In the debugging case, # XEND_DEBUG=1 xend trace_start )
-
- 3. start domU
- # xm create -c rhel4
diff --git a/xen/arch/ia64/tools/README.xenoprof b/xen/arch/ia64/tools/README.xenoprof
deleted file mode 100644
index bdb6c25cc3..0000000000
--- a/xen/arch/ia64/tools/README.xenoprof
+++ /dev/null
@@ -1,154 +0,0 @@
- Xenoprof/IA64 HOWTO
-
- Written by Isaku Yamahata <yamahata at valinux co jp>
- 15th Jan, 2008
-
-
-Introduction
-------------
-This documents describes how to use xenoprof/ia64.
-See oprofile site for the details of oprofile itself.
-
-
-Reference
----------
-oprofile
-http://oprofile.sourceforge.net/news/
-
-xenoprof
-http://xenoprof.sourceforge.net/
-
-
-Requirement
------------
-- xen VMM
- xen-ia64-devel.hg c/s 16632:2900e4dacaa7 or later
- Probably xen 3.2 or later would be OK.
-
-- dom0/domU Linux
- linux kernel tree corresponding to xen VMM.
-
-- Oprofile
- oprofile 0.9.3 or later
- get the patch for oprofile 0.9.3 from http://xenoprof.sourceforge.net/
- oprofile-0.9.3-xen-r2.patch or later
- NOTE:The xenoprof/IA64 specific part is already included in the development
- cvs tree.
-
-- Documentation
- You can find the documentation from http://xenoprof.sourceforge.net/.
- It doesn't cover IA64 specific part, but it almost applies.
-
-
-Options to opcontrol
---------------------
-You have to tell the oprofile daemon that the session is xenoprof one
-by "--xen" option.
---xen=<xen_image_file>
- Specify the xen image.
---active-domains<list>
- Specify active domains
---passive-domains=<list>
- Specify passive domains
-
-
-Examples
---------
-- dom0 active, dom1 passive case example
- on dom0
- # opctonrol --start-daemon --xen=<path to xen-syms> \
- --vmlinux=<path to vmlinux> \
- --active-domains=0 --passive-domains=1
- # opcontrol --start
-
- <make activity you want>
-
- # opcontrol --stop (or opcontrol --shutdown)
- # opreport -l or something to get the result
-
-
-- both dom0 and dom1 active example
- on dom0
- # opctonrol --start-daemon --xen=<path to xen-syms> \
- --vmlinux=<path to vmlinux> \
- --active-domains=0,1
-
- on dom1
- # opctonrol --start-daemon --xen=<path to xen-syms> \
- --vmlinux=<path to vmlinux>
- domain1 isn't primary domain so that --active-domains/--passive-domains
- shouldn't be specified.
-
- on dom0
- # opcontrol --start
- on dom1
- # opcontrol --start
-
- <make activity you want>
-
- on dom1
- # opcontrol --stop (or opcontrol --shutdown)
- on dom0
- # opcontrol --stop (or opcontrol --shutdown)
-
- on dom0
- # opreport -l or something to get the result of dom0
- on dom1
- # opreport -l or something to get the result of dom1
-
-
-Result example
---------------
-The app name of non-dom0 domain would be domain<N>-{xen, kernel, modules, app}
-where N is the domain id.
-You may want to create domain<N>-xen domain<N>-kernel to get the symbol names.
-
-# opreport
-CPU: Itanium 2, speed 1595 MHz (estimated)
-Counted L2DTLB_MISSES events (L2DTLB Misses) with a unit mask of 0x00 (No unit mask) count 5000
-L2DTLB_MISSES:...|
- samples| %|
-------------------
- 242 40.2662 domain1-kernel
- 176 29.2845 domain1-xen
- 128 21.2978 domain1-apps
- 55 9.1514 xen-syms
-
-# opreport -l
-CPU: Itanium 2, speed 1595 MHz (estimated)
-Counted L2DTLB_MISSES events (L2DTLB Misses) with a unit mask of 0x00 (No unit mask) count 5000
-warning: /boot/domain1-xen could not be found.
-warning: /domain1-apps could not be found.
-warning: /domain1-kernel could not be found.
-samples % app name symbol name
-242 40.2662 domain1-kernel (no symbols)
-176 29.2845 domain1-xen (no symbols)
-128 21.2978 domain1-apps (no symbols)
-16 2.6622 xen-syms context_switch
-16 2.6622 xen-syms lookup_domain_mpa
-7 1.1647 xen-syms vcpu_get_domain_bundle
-3 0.4992 xen-syms do_dom0vp_op
-3 0.4992 xen-syms lookup_noalloc_domain_pte
-3 0.4992 xen-syms xencomm_get_page
-2 0.3328 xen-syms __copy_user
-2 0.3328 xen-syms vcpu_translate
-1 0.1664 xen-syms ia64_frametable_probe
-1 0.1664 xen-syms vcpu_wake
-1 0.1664 xen-syms xencomm_ctxt_init
-
-
-Limitations
------------
-- Don't create/destroy/save/restore/live migration during xenoprof session.
- Otherwise something would go wrong. (including xen VMM hang)
- This isn't ia64 specific.
- If you want to profile early boot phase, "xm create -p/unpause" is your
- friend.
-
-- Currently only the generic PMC/PMD is supported.
- The CPU implementation specific PMC/PMD isn't supported.
-
-- calling graph isn't supported yet.
-
-- The active domain for HVM domain isn't supported
- Xen/IA64 VMM itself supports it, however the driver doesn't exist.
diff --git a/xen/arch/ia64/tools/linux-xen-diffs b/xen/arch/ia64/tools/linux-xen-diffs
deleted file mode 100644
index 939ba47861..0000000000
--- a/xen/arch/ia64/tools/linux-xen-diffs
+++ /dev/null
@@ -1,25 +0,0 @@
-# generate a patch for all the files in linux-xen directories
-# (these are files that are identical to linux except for a few small changes)
-# run in the main xen directory
-LINUXPATH=/home/djm/linux-2.6.13
-OUTFILE=/tmp/linux-xen-diffs
-wd=$PWD
-for i in include/asm-ia64/linux-xen/asm include/asm-ia64/linux-xen/linux arch/ia64/linux-xen
-do
-echo '#############' $i '#############'
-cd $i
-ln -s $LINUXPATH xxx
-cat README.origin | grep -v '^#' | grep -v '^[ ]*$' | \
- sed -e 's/[ ]*-> linux/ xxx/' | \
- sed -e 's/^/diff -Naur /' | bash
-rm -f xxx
-cd $wd
-done > $OUTFILE
-echo 'Total lines in patch:' `cat $OUTFILE | grep -v '########' | wc -l`
-echo 'Approx. changes in patch:' `cat $OUTFILE | grep -v '########' | \
- grep -- '-#if' | wc -l`
-echo 'Total lines added by patch:' `cat $OUTFILE | grep -v '########' | \
- grep -- '^-' | wc -l`
-echo 'Total non-conditional-compile lines added by patch:' \
- `cat $OUTFILE | grep -v '########' | grep -- '^-' | \
- egrep -v '^-#if|^-#else|^-#endif' | wc -l`
diff --git a/xen/arch/ia64/tools/p2m_expose/Makefile b/xen/arch/ia64/tools/p2m_expose/Makefile
deleted file mode 100644
index 8b450d9900..0000000000
--- a/xen/arch/ia64/tools/p2m_expose/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-ifneq ($(KERNELRELEASE),)
-obj-m += expose_p2m.o
-else
-PWD := $(shell pwd)
-TOPDIR ?= $(abspath $(PWD)/../../../../..)
-KVER ?= $(shell awk '/^LINUX_VER\>/{print $$3}' $(TOPDIR)/buildconfigs/mk.linux-2.6-xen)
-KDIR ?= $(TOPDIR)/linux-$(KVER)-xen
-#CROSS_COMPILE ?= ia64-unknown-linux-
-#ARCH ?= ia64
-
-ifneq ($(O),)
-OPT_O := O=$(realpath $(O))
-else
-OPT_O ?= O=$(TOPDIR)/build-linux-$(KVER)-xen_ia64
-endif
-
-ifneq ($(V),)
-OPT_V := V=$(V)
-endif
-
-ifneq ($(ARCH),)
-OPT_ARCH := ARCH=$(ARCH)
-endif
-
-ifneq ($(CROSS_COMPILE),)
-OPT_CORSS_COMPILE := CROSS_COMPILE=$(CROSS_COMPILE)
-endif
-
-default:
- $(MAKE) -C $(KDIR) $(OPT_O) $(OPT_V) $(OPT_CORSS_COMPILE) $(OPT_ARCH) M=$(PWD)
-endif
diff --git a/xen/arch/ia64/tools/p2m_expose/README.p2m_expose b/xen/arch/ia64/tools/p2m_expose/README.p2m_expose
deleted file mode 100644
index 3b51e11305..0000000000
--- a/xen/arch/ia64/tools/p2m_expose/README.p2m_expose
+++ /dev/null
@@ -1,12 +0,0 @@
-This directory contains Linux kernel module for p2m exposure test/benchmark.
-
-1. build kernel module
- - At fist build, linux-xen as usual
- - then type just 'make' in this directory, then you'll have expose_p2m.ko.
- See Makefile for details.
-
-2. test, benchmark.
- - type 'insmod expose_p2m.ko' on the system.
- Then the result is printed out to your console.
- insmod fails with EINVAL so that you don't have to execute rmmod.
-
diff --git a/xen/arch/ia64/tools/p2m_expose/expose_p2m.c b/xen/arch/ia64/tools/p2m_expose/expose_p2m.c
deleted file mode 100644
index 9ab2a5578d..0000000000
--- a/xen/arch/ia64/tools/p2m_expose/expose_p2m.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/******************************************************************************
- * arch/ia64/xen/expose_p2m.c
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/hypercall.h>
-#include <asm/hypervisor.h>
-
-#define printd(fmt, ...) printk("%s:%d " fmt, __func__, __LINE__, \
- ##__VA_ARGS__)
-
-// copied from arch/ia64/mm/tlb.c. it isn't exported.
-void
-local_flush_tlb_all (void)
-{
- unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
-
- addr = local_cpu_data->ptce_base;
- count0 = local_cpu_data->ptce_count[0];
- count1 = local_cpu_data->ptce_count[1];
- stride0 = local_cpu_data->ptce_stride[0];
- stride1 = local_cpu_data->ptce_stride[1];
-
- local_irq_save(flags);
- for (i = 0; i < count0; ++i) {
- for (j = 0; j < count1; ++j) {
- ia64_ptce(addr);
- addr += stride1;
- }
- addr += stride0;
- }
- local_irq_restore(flags);
- ia64_srlz_i(); /* srlz.i implies srlz.d */
-}
-
-static void
-do_p2m(unsigned long (*conv)(unsigned long),
- const char* msg, const char* prefix,
- unsigned long start_gpfn, unsigned end_gpfn, unsigned long stride)
-{
- struct timeval before_tv;
- struct timeval after_tv;
- unsigned long gpfn;
- unsigned long mfn;
- unsigned long count;
- s64 nsec;
-
- count = 0;
- do_gettimeofday(&before_tv);
- for (gpfn = start_gpfn; gpfn < end_gpfn; gpfn += stride) {
- mfn = (*conv)(gpfn);
- count++;
- }
- do_gettimeofday(&after_tv);
- nsec = timeval_to_ns(&after_tv) - timeval_to_ns(&before_tv);
- printk("%s stride %4ld %s: %9ld / %6ld = %5ld nsec\n",
- msg, stride, prefix,
- nsec, count, nsec/count);
-}
-
-
-static void
-do_with_hypercall(const char* msg,
- unsigned long start_gpfn, unsigned long end_gpfn,
- unsigned long stride)
-{
- do_p2m(&HYPERVISOR_phystomach, msg, "hypercall",
- start_gpfn, end_gpfn, stride);
-}
-
-static void
-do_with_table(const char* msg,
- unsigned long start_gpfn, unsigned long end_gpfn,
- unsigned long stride)
-{
- do_p2m(&p2m_phystomach, msg, "p2m table",
- start_gpfn, end_gpfn, stride);
-}
-
-static int __init
-expose_p2m_init(void)
-{
- unsigned long gpfn;
- unsigned long mfn;
- unsigned long p2m_mfn;
-
- int error_count = 0;
-
- const int strides[] = {
- PTRS_PER_PTE, PTRS_PER_PTE/2, PTRS_PER_PTE/3, PTRS_PER_PTE/4,
- L1_CACHE_BYTES/sizeof(pte_t), 1
- };
- int i;
-
-
-#if 0
- printd("about to call p2m_expose_init()\n");
- if (p2m_expose_init() < 0) {
- printd("p2m_expose_init() failed\n");
- return -EINVAL;
- }
- printd("p2m_expose_init() success\n");
-#else
- if (!p2m_initialized) {
- printd("p2m exposure isn't initialized\n");
- return -EINVAL;
- }
-#endif
-
- printd("p2m expose test begins\n");
- for (gpfn = p2m_min_low_pfn; gpfn < p2m_max_low_pfn; gpfn++) {
- mfn = HYPERVISOR_phystomach(gpfn);
- p2m_mfn = p2m_phystomach(gpfn);
- if (mfn != p2m_mfn) {
- printd("gpfn 0x%016lx "
- "mfn 0x%016lx p2m_mfn 0x%016lx\n",
- gpfn, mfn, p2m_mfn);
- printd("mpaddr 0x%016lx "
- "maddr 0x%016lx p2m_maddr 0x%016lx\n",
- gpfn << PAGE_SHIFT,
- mfn << PAGE_SHIFT, p2m_mfn << PAGE_SHIFT);
-
- error_count++;
- if (error_count > 16) {
- printk("too many errors\n");
- return -EINVAL;
- }
- }
- }
- printd("p2m expose test done!\n");
-
- printk("type "
- "stride "
- "type : "
- " nsec / count = "
- "nsec per conv\n");
- for (i = 0; i < sizeof(strides)/sizeof(strides[0]); i++) {
- int stride = strides[i];
- local_flush_tlb_all();
- do_with_hypercall("cold tlb",
- p2m_min_low_pfn, p2m_max_low_pfn, stride);
- do_with_hypercall("warm tlb",
- p2m_min_low_pfn, p2m_max_low_pfn, stride);
-
- local_flush_tlb_all();
- do_with_table("cold tlb",
- p2m_min_low_pfn, p2m_max_low_pfn, stride);
- do_with_table("warm tlb",
- p2m_min_low_pfn, p2m_max_low_pfn, stride);
- }
-
- return -EINVAL;
-}
-
-static void __exit
-expose_p2m_cleanup(void)
-{
-}
-
-module_init(expose_p2m_init);
-module_exit(expose_p2m_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Isaku Yamahata <yamahata@valinux.co.jp>");
diff --git a/xen/arch/ia64/tools/privify/Makefile b/xen/arch/ia64/tools/privify/Makefile
deleted file mode 100644
index 9283c0b20d..0000000000
--- a/xen/arch/ia64/tools/privify/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-privify: privify_elf64.o privify.o
- gcc -g privify.o privify_elf64.o -o privify
-
-
-privify_elf64.o: privify_elf64.c
- gcc -g -D__KERNEL__ -c privify_elf64.c
-
-privify.o: privify.c
- gcc -nostdinc -g -D__KERNEL__ -c privify.c
diff --git a/xen/arch/ia64/tools/privify/README.privify b/xen/arch/ia64/tools/privify/README.privify
deleted file mode 100644
index 77e3b00449..0000000000
--- a/xen/arch/ia64/tools/privify/README.privify
+++ /dev/null
@@ -1,8 +0,0 @@
-In this directory, just "make".
-
-Run the resulting program on a vmlinux that has been adjusted
-to run on Xen (see arch/ia64/tools/README.xenia64linux):
-
- ./privify vmlinux xenlinux
-
-Use the resulting xenlinux file as domain0
diff --git a/xen/arch/ia64/tools/privify/privify.c b/xen/arch/ia64/tools/privify/privify.c
deleted file mode 100644
index 2b10186778..0000000000
--- a/xen/arch/ia64/tools/privify/privify.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Binary translate privilege-sensitive ops to privileged
- *
- * Copyright (C) 2004 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- *
- */
-
-#include "privify.h"
-
-typedef unsigned long long u64;
-typedef unsigned long long IA64_INST;
-
-typedef union U_IA64_BUNDLE {
- u64 i64[2];
- struct { u64 template:5,slot0:41,slot1a:18,slot1b:23,slot2:41; };
- // NOTE: following doesn't work because bitfields can't cross natural
- // size boundaries
- //struct { u64 template:5, slot0:41, slot1:41, slot2:41; };
-} IA64_BUNDLE;
-
-typedef enum E_IA64_SLOT_TYPE { I, M, F, B, L, ILLEGAL } IA64_SLOT_TYPE;
-
-typedef union U_INST64_A5 {
- IA64_INST inst;
- struct { u64 qp:6, r1:7, imm7b:7, r3:2, imm5c:5, imm9d:9, s:1, major:4; };
-} INST64_A5;
-
-typedef union U_INST64_B4 {
- IA64_INST inst;
- struct { u64 qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6, wh:2, d:1, un1:1, major:4; };
-} INST64_B4;
-
-typedef union U_INST64_B8 {
- IA64_INST inst;
- struct { u64 qp:6, un21:21, x6:6, un4:4, major:4; };
-} INST64_B8;
-
-typedef union U_INST64_B9 {
- IA64_INST inst;
- struct { u64 qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
-} INST64_B9;
-
-typedef union U_INST64_I19 {
- IA64_INST inst;
- struct { u64 qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
-} INST64_I19;
-
-typedef union U_INST64_I26 {
- IA64_INST inst;
- struct { u64 qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4;};
-} INST64_I26;
-
-typedef union U_INST64_I27 {
- IA64_INST inst;
- struct { u64 qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4;};
-} INST64_I27;
-
-typedef union U_INST64_I28 { // not privileged (mov from AR)
- IA64_INST inst;
- struct { u64 qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4;};
-} INST64_I28;
-
-typedef union U_INST64_M28 {
- IA64_INST inst;
- struct { u64 qp:6, :14, r3:7, x6:6, x3:3, :1, major:4;};
-} INST64_M28;
-
-typedef union U_INST64_M29 {
- IA64_INST inst;
- struct { u64 qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4;};
-} INST64_M29;
-
-typedef union U_INST64_M30 {
- IA64_INST inst;
- struct { u64 qp:6, :7, imm:7, ar3:7,x4:4,x2:2,x3:3,s:1,major:4;};
-} INST64_M30;
-
-typedef union U_INST64_M31 {
- IA64_INST inst;
- struct { u64 qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4;};
-} INST64_M31;
-
-typedef union U_INST64_M32 {
- IA64_INST inst;
- struct { u64 qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4;};
-} INST64_M32;
-
-typedef union U_INST64_M33 {
- IA64_INST inst;
- struct { u64 qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M33;
-
-typedef union U_INST64_M35 {
- IA64_INST inst;
- struct { u64 qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-
-} INST64_M35;
-
-typedef union U_INST64_M36 {
- IA64_INST inst;
- struct { u64 qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
-} INST64_M36;
-
-typedef union U_INST64_M41 {
- IA64_INST inst;
- struct { u64 qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-} INST64_M41;
-
-typedef union U_INST64_M42 {
- IA64_INST inst;
- struct { u64 qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M42;
-
-typedef union U_INST64_M43 {
- IA64_INST inst;
- struct { u64 qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M43;
-
-typedef union U_INST64_M44 {
- IA64_INST inst;
- struct { u64 qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
-} INST64_M44;
-
-typedef union U_INST64_M45 {
- IA64_INST inst;
- struct { u64 qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M45;
-
-typedef union U_INST64_M46 {
- IA64_INST inst;
- struct { u64 qp:6, r1:7, un7:7, r3:7, x6:6, x3:3, un1:1, major:4; };
-} INST64_M46;
-
-typedef union U_INST64 {
- IA64_INST inst;
- struct { u64 :37, major:4; } generic;
- INST64_A5 A5; // used in build_hypercall_bundle only
- INST64_B4 B4; // used in build_hypercall_bundle only
- INST64_B8 B8; // rfi, bsw.[01]
- INST64_B9 B9; // break.b
- INST64_I19 I19; // used in build_hypercall_bundle only
- INST64_I26 I26; // mov register to ar (I unit)
- INST64_I27 I27; // mov immediate to ar (I unit)
- INST64_I28 I28; // mov from ar (I unit)
- INST64_M28 M28; // purge translation cache entry
- INST64_M29 M29; // mov register to ar (M unit)
- INST64_M30 M30; // mov immediate to ar (M unit)
- INST64_M31 M31; // mov from ar (M unit)
- INST64_M32 M32; // mov reg to cr
- INST64_M33 M33; // mov from cr
- INST64_M35 M35; // mov to psr
- INST64_M36 M36; // mov from psr
- INST64_M41 M41; // translation cache insert
- INST64_M42 M42; // mov to indirect reg/translation reg insert
- INST64_M43 M43; // mov from indirect reg
- INST64_M44 M44; // set/reset system mask
- INST64_M45 M45; // translation purge
- INST64_M46 M46; // translation access (tpa,tak)
-} INST64;
-
-#define MASK_41 ((u64)0x1ffffffffff)
-
-long priv_verbose = 0;
-#define verbose(a...) do { if (priv_verbose) printf(a); } while(0)
-
-/*
- * privify_inst
- *
- * Replaces privilege-sensitive instructions (and reads from write-trapping
- * registers) with privileged/trapping instructions as follows:
- * mov rx=ar.cflg -> mov ar.cflg=r(x+64) [**]
- * mov rx=ar.ky -> mov ar.ky=r(x+64)
- * fc rx -> ptc r(x+64)
- * thash rx=ry -> tak rx=r(y+64)
- * ttag rx=ry -> tpa rx=r(y+64)
- * mov rx=cpuid[ry] -> mov r(x+64)=rr[ry]
- * mov rx=pmd[ry] -> mov r(x+64)=pmc[ry] [**]
- * cover -> break.b 0x1fffff
- *
- * [**] not currently implemented
- */
-IA64_INST privify_inst(IA64_INST inst_val,
- IA64_SLOT_TYPE slot_type, IA64_BUNDLE *bp, char **msg)
-{
- INST64 inst = *(INST64 *)&inst_val;
-
- *msg = 0;
- switch (slot_type) {
- case M:
- // FIXME: Also use for mov_to/from_ar.cflag (M29/M30) (IA32 only)
- if (inst.generic.major != 1) break;
- if (inst.M46.x3 != 0) break;
- if (inst.M31.x6 == 0x22 && inst.M31.ar3 < 8) {
- // mov r1=kr -> mov kr=r1+64
- verbose("privify_inst: privified mov r1=kr @%p\n",bp);
- if (inst.M31.r1 >= 64) *msg = "mov r1=kr w/r1>63";
- else privify_mov_from_kr_m(inst);
- break;
- }
- if (inst.M29.x6 == 0x2a && inst.M29.ar3 < 8) {// mov kr=r1
- if (inst.M29.r2 >= 64) *msg = "mov kr=r2 w/r2>63";
- break;
- }
- if (inst.M28.x6 == 0x30) {
- // fc r3-> ptc r3+64
- verbose("privify_inst: privified fc r3 @%p\n",bp);
- if (inst.M28.r3 >= 64) *msg = "fc r3 w/r3>63";
- else privify_fc(inst);
- break;
- }
- if (inst.M28.x6 == 0x34) {
- if (inst.M28.r3 >= 64) *msg = "ptc.e w/r3>63";
- break;
- }
- if (inst.M46.un7 != 0) break;
- if (inst.M46.un1 != 0) break;
- if (inst.M46.x6 == 0x1a) { // thash -> tak r1=r3+64
- verbose("privify_inst: privified thash @%p\n",bp);
- if (inst.M46.r3 >= 64) *msg = "thash w/r3>63";
- else privify_thash(inst);
- }
- else if (inst.M46.x6 == 0x1b) { // ttag -> tpa r1=r3+64
- verbose("privify_inst: privified ttag @%p\n",bp);
- if (inst.M46.r3 >= 64) *msg = "ttag w/r3>63";
- else privify_ttag(inst);
- }
- else if (inst.M43.x6 == 0x17) {
- verbose("privify_inst: privified mov_from_cpuid @%p\n",bp);
- if (inst.M43.r1 >= 64) *msg = "mov_from_cpuid w/r1>63";
- else privify_mov_from_cpuid(inst);
- }
- else if (inst.M46.x6 == 0x1e) { // tpa
- if (inst.M46.r3 >= 64) *msg = "tpa w/r3>63";
- }
- else if (inst.M46.x6 == 0x1f) { // tak
- if (inst.M46.r3 >= 64) *msg = "tak w/r3>63";
- }
- else if (inst.M43.x6 == 0x10) {
- if (inst.M43.r1 >= 64) *msg = "mov_to_rr w/r1>63";
- }
- break;
- case B:
- if (inst.generic.major != 0) break;
- if (inst.B8.x6 == 0x2) { // cover -> break.b 0x1fffff
- if (inst.B8.un21 != 0) break;
- if (inst.B8.un4 != 0) break;
- privify_cover(inst);
- verbose("privify_inst: privified cover @%p\n",bp);
- }
- if (inst.B9.x6 == 0x0) { // (p15) break.b 0x1fffff -> cover
- if (inst.B9.qp != 15) break;
- if (inst.B9.imm20 != 0xfffff) break;
- if (inst.B9.i != 1) break;
- inst.B8.x6 = 0x2;
- inst.B8.un21 = 0;
- inst.B8.un4 = 0;
- inst.B8.qp = 0;
- verbose("privify_inst: unprivified pseudo-cover @%p\n",
- bp);
- }
- break;
- case I: // only used for privifying mov_from_ar
- // FIXME: Also use for mov_to/from_ar.cflag (I26/I27) (IA32 only)
- if (inst.generic.major != 0) break;
- if (inst.I28.x6 == 0x32 && !inst.I28.x3 && inst.I28.ar3 < 8) {
- // mov r1=kr -> mov kr=r1+64
- verbose("privify_inst: privified mov r1=kr @%p\n",bp);
- if (inst.I28.r1 >= 64) *msg = "mov r1=kr w/r1>63";
- else privify_mov_from_kr_i(inst);
- }
- else if (inst.I26.x6 == 0x2a && !inst.I26.x3 &&
- inst.I26.ar3 < 8) {// mov kr=r1
- if (inst.I26.r2 >= 64) *msg = "mov kr=r2 w/r2>63";
- }
- break;
- case F: case L: case ILLEGAL:
- break;
- }
- return *(IA64_INST *)&inst;
-}
-
-#define read_slot1(b) (((b.i64[0]>>46L) | (b.i64[1]<<18UL)) & MASK_41)
-// Not sure why, but this more obvious definition of read_slot1 doesn't work
-// because the compiler treats (b.slot1b<<18UL) as a signed 32-bit integer
-// so not enough bits get used and it gets sign extended to boot!
-//#define read_slot1(b) ((b.slot1a | (b.slot1b<<18UL)) & MASK_41)
-#define write_slot1(b,inst) do { b.slot1a=inst;b.slot1b=inst>>18UL;} while (0)
-
-
-void privify_memory(void *start, unsigned long len)
-{
- IA64_BUNDLE bundle, *bp = (IA64_BUNDLE *)start;
- IA64_INST tmp;
- char *msg;
-
-printf("privifying %ld bytes of memory at %p\n",len,start);
- if ((unsigned long)start & 0xfL) {
- printf("unaligned memory block in privify_memory\n");
- }
- len &= ~0xf;
- for (bundle = *bp; len; len -= 16) {
- switch(bundle.template) {
- case 0x06: case 0x07: case 0x14: case 0x15:
- case 0x1a: case 0x1b: case 0x1e: case 0x1f:
- break;
- case 0x16: case 0x17:
- // may be B in slot0/1 but cover can only be slot2
- bundle.slot2 = privify_inst(bundle.slot2,B,bp,&msg);
- break;
- case 0x00: case 0x01: case 0x02: case 0x03:
- tmp = privify_inst(read_slot1(bundle),I,bp,&msg);
- write_slot1(bundle,tmp);
- case 0x0c: case 0x0d:
- bundle.slot2 = privify_inst(bundle.slot2,I,bp,&msg);
- case 0x04: case 0x05:
- // could a privified cover be in slot2 here?
- bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
- break;
- case 0x08: case 0x09: case 0x0a: case 0x0b:
- bundle.slot2 = privify_inst(bundle.slot2,I,bp,&msg);
- case 0x0e: case 0x0f:
- bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
- if (msg) break;
- tmp = privify_inst(read_slot1(bundle),M,bp,&msg);
- write_slot1(bundle,tmp);
- break;
- case 0x10: case 0x11:
- tmp = privify_inst(read_slot1(bundle),I,bp,&msg);
- write_slot1(bundle,tmp);
- case 0x12: case 0x13:
- // may be B in slot1 but cover can only be slot2
- case 0x1c: case 0x1d:
- bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
- if (msg) break;
- bundle.slot2 = privify_inst(bundle.slot2,B,bp,&msg);
- break;
- case 0x18: case 0x19:
- bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
- if (msg) break;
- tmp = privify_inst(read_slot1(bundle),M,bp,&msg);
- write_slot1(bundle,tmp);
- if (msg) break;
- bundle.slot2 = privify_inst(bundle.slot2,B,bp,&msg);
- break;
- }
- if (msg) {
- if (bundle.slot2)
- printf("privify_memory: %s @%p\n",msg,bp);
- else
- printf("privify_memory: %s @%p probably not insts\n",
- msg,bp);
- printf("privify_memory: bundle=%p,%p\n",
- bundle.i64[1],bundle.i64[0]);
- }
- *bp = bundle;
- bundle = *++bp;
- }
-
-}
diff --git a/xen/arch/ia64/tools/privify/privify.h b/xen/arch/ia64/tools/privify/privify.h
deleted file mode 100644
index 49291b3139..0000000000
--- a/xen/arch/ia64/tools/privify/privify.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Binary translate privilege-sensitive ops to privileged
- *
- * Copyright (C) 2004 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- *
- */
-
-/*
- * Macros to replace privilege-sensitive instructions (and reads from
- * write-trapping registers) with privileged/trapping instructions as follows:
- * mov rx=ar.cflg -> mov ar.cflg=r(x+64) [**]
- * mov rx=ar.ky -> mov ar.ky=r(x+64)
- * fc rx -> ptc r(x+64)
- * thash rx=ry -> tak rx=r(y+64)
- * ttag rx=ry -> tpa rx=r(y+64)
- * mov rx=cpuid[ry] -> mov r(x+64)=rr[ry]
- * mov rx=pmd[ry] -> mov r(x+64)=pmc[ry] [**]
- * cover -> break.b 0x1fffff
- * [**] not implemented yet
- */
-
-#define notimpl(s) printk(s##" not implemented");
-#define privify_mov_from_cflg_m(i) do { notimpl("mov from ar.cflg"); } while(0)
-#define privify_mov_from_cflg_i(i) do { notimpl("mov from ar.cflg"); } while(0)
-#define privify_mov_from_kr_m(i) do { i.M31.x6 = 0x2a; i.M29.r2 = i.M31.r1 + 64; } while(0)
-#define privify_mov_from_kr_i(i) do { i.I28.x6 = 0x2a; i.I26.r2 = i.I28.r1 + 64; } while(0)
-#define privify_fc(i) do { i.M28.x6 = 0x34; i.M28.r3 = i.M28.r3 + 64; } while(0)
-#define privify_thash(i) do { i.M46.x6 = 0x1f; i.M46.r3 += 64; } while(0)
-#define privify_ttag(i) do { i.M46.x6 = 0x1f; i.M46.r3 += 64; } while(0)
-#define privify_mov_from_cpuid(i) do { i.M43.x6 = 0x10; i.M43.r1 += 64; } while(0)
-#define privify_mov_from_pmd(i) do { notimpl("mov from pmd"); } while(0)
-#define privify_cover(x) do { x.B8.x6 = 0x0; x.B9.imm20 = 0xfffff; x.B9.i = 0x1; } while(0)
-
diff --git a/xen/arch/ia64/tools/privify/privify_elf64.c b/xen/arch/ia64/tools/privify/privify_elf64.c
deleted file mode 100644
index 2fa9e49256..0000000000
--- a/xen/arch/ia64/tools/privify/privify_elf64.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Binary translate privilege-sensitive ops to privileged
- *
- * Copyright (C) 2004 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- *
- */
-
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#define ELFSIZE 64
-#include <linux/elf.h>
-
-#define MAX_FILSIZ (32*1024*1024)
-unsigned long buf[MAX_FILSIZ/sizeof(unsigned long)];
-
-static void
-usage (FILE *fp)
-{
- fprintf(fp, "Usage: privify elf64filein elf64fileout\n");
-}
-
-static void
-panic (char *s)
-{
- fprintf(stderr, "panic: %s\n",s);
- exit(1);
-}
-
-static int
-read_file(const char *in_path, char *buf, int maxsize)
-{
- ssize_t nread, totread = 0, ssize_inc = 8192;
- int from;
-
- if ((from = open (in_path, O_RDONLY)) < 0) return -1;
- maxsize -= ssize_inc; // create safety zone
- if (maxsize < 0) panic("input file exceeds max size");
- while ((nread = read(from, buf, ssize_inc)) > 0) {
- if (nread < 0) return -1; // problem
- totread += nread;
- if (nread < ssize_inc) return totread; // done
- buf += ssize_inc;
- if (totread > maxsize) // buffer too small
- panic("file exceeds max size\n");
- }
- return totread;
-}
-
-static int
-write_file(const char *out_path, char *buf, int size)
-{
- int to;
-
- if ((to = open(out_path, O_WRONLY|O_CREAT|O_EXCL,0644)) < 0)
- return -1;
-
- if (write(to,buf,size) < 0) return -1;
-
- return 0;
-}
-
-#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
- (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
- (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
- (ehdr).e_ident[EI_MAG3] == ELFMAG3)
-
-
-static void
-privify_elf(char *elfbase)
-{
- Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfbase;
- Elf64_Phdr *phdr;
- Elf64_Shdr *shdr;
- char *elfaddr;
- unsigned long size;
- int h;
-
- if ( !IS_ELF(*ehdr) )
- panic("Kernel image does not have an ELF header.\n");
- for ( h = 0; h < ehdr->e_phnum; h++ ) {
- phdr = (Elf64_Phdr *)(elfbase +
- ehdr->e_phoff + (h*ehdr->e_phentsize));
- printf("h=%d, phdr=%p,phdr->p_type=%lx",h,phdr,phdr->p_type);
- if ((phdr->p_type != PT_LOAD)) {
- printf("\n");
- continue;
- }
- size = phdr->p_filesz;
- elfaddr = elfbase + phdr->p_offset;
- printf(",elfaddr=%p,size=%d,phdr->p_flags=%lx\n",
- elfaddr,size,phdr->p_flags);
- if (phdr->p_flags & PF_X) privify_memory(elfaddr,size);
- }
-}
-
-int
-main(int argc, char **argv)
-{
- char *in_path, *out_path;
- int fsize;
-
- if (argc != 3) {
- usage(stdout);
- exit(1);
- }
- in_path = argv[1];
- out_path = argv[2];
- if ((fsize = read_file(in_path,(char *)buf,MAX_FILSIZ)) < 0) {
- perror("read_file");
- panic("failed");
- }
- privify_elf((char *)buf);
- fflush(stdout);
- if (write_file(out_path,(char *)buf,fsize) < 0) {
- perror("write_file");
- panic("failed");
- }
-}
diff --git a/xen/arch/ia64/tools/privop/Makefile b/xen/arch/ia64/tools/privop/Makefile
deleted file mode 100644
index fbe5f0ad3e..0000000000
--- a/xen/arch/ia64/tools/privop/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-CC=gcc
-CFLAGS=-O -Wall
-
-.PHONY: all
-all: postat
-
-postat: postat.c pohcalls.o
-
-.PHONY: clean
-clean:
- $(RM) -f *.o postat *.s *~
-
-
diff --git a/xen/arch/ia64/tools/privop/pohcalls.S b/xen/arch/ia64/tools/privop/pohcalls.S
deleted file mode 100644
index d58b1277fe..0000000000
--- a/xen/arch/ia64/tools/privop/pohcalls.S
+++ /dev/null
@@ -1,30 +0,0 @@
- .file "hypercall.S"
- .pred.safe_across_calls p1-p5,p16-p63
- .text
- .align 16
- .global dump_privop_counts#
- .proc dump_privop_counts#
-dump_privop_counts:
- .prologue
- .body
- mov r2 = 0xffff
- ;;
- break 0x1000
- ;;
- br.ret.sptk.many b0
- ;;
- .endp dump_privop_counts#
- .align 16
- .global zero_privop_counts#
- .proc zero_privop_counts#
-zero_privop_counts:
- .prologue
- .body
- mov r2 = 0xfffe
- ;;
- break 0x1000
- ;;
- br.ret.sptk.many b0
- ;;
- .endp zero_privop_counts#
-
diff --git a/xen/arch/ia64/tools/privop/postat.c b/xen/arch/ia64/tools/privop/postat.c
deleted file mode 100644
index fc96e157b8..0000000000
--- a/xen/arch/ia64/tools/privop/postat.c
+++ /dev/null
@@ -1,27 +0,0 @@
-#include <stdio.h>
-#include <string.h>
-
-extern int dump_privop_counts (char *buf, int len);
-
-extern int zero_privop_counts (char *buf, int len);
-
-int
-main (int argc, char *argv[])
-{
- static char buf[8192];
- int res;
-
- if (argc == 1)
- res = dump_privop_counts (buf, sizeof (buf));
- else if (argc == 2 && strcmp (argv[1], "--clear") == 0)
- res = zero_privop_counts (buf, sizeof (buf));
- else
- {
- printf ("usage: %s [--clear]\n", argv[0]);
- return 1;
- }
- printf ("res=%d\n", res);
- fputs (buf, stdout);
-
- return 0;
-}
diff --git a/xen/arch/ia64/tools/sparse-merge b/xen/arch/ia64/tools/sparse-merge
deleted file mode 100755
index 83017bc4a7..0000000000
--- a/xen/arch/ia64/tools/sparse-merge
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/bin/bash
-# Generate a patch for each of the ia64 files in the linux-2.6-xen-sparse tree
-
-# Path to mercurial tree of upstream Linux
-# WARNING: This will do an 'hg up -C' on the upstream Linux tree, you
-# will lose data if there's anything there you care about.
-: ${LINUXPATH:=/tmp/linux-2.6}
-# Tag of current base upstream image for Xen files
-: ${OLDTAG:=v$(awk '/^LINUX_VER/{print $NF}' buildconfigs/mk.linux-2.6-xen)}
-# Tag of new upstream base to go to
-: ${NEWTAG:=v$(wget -O- -o/dev/null http://kernel.org/kdist/finger_banner \
- | awk '/latest stable/{print $NF}')}
-# Restrict merge to specific arch (set to . for all)
-: ${ARCH:=ia64}
-
-SPARSEDIR=linux-2.6-xen-sparse
-WD=$PWD
-
-if [ ! -d $SPARSEDIR ]; then
- echo "Can't find $SPARSEDIR directory."
- exit
-fi
-
-# Check for modified files in the sparse tree before starting
-if hg st $SPARSEDIR | head | grep .; then
- echo
- echo "$SPARSEDIR contains modifications, please clean it up first"
- exit
-fi
-
-# We want the linux upstream tree to be at the OLDTAG to get the OLDTAG-Xen diff.
-# Save current revision to restore when done
-cd $LINUXPATH || exit 1
-OLDCSET=$(hg parents | awk '/^changeset:/{print($2)}' | cut -f 1 -d :)
-for t in $OLDTAG $NEWTAG; do
- [[ $t == *.* ]] || continue
- if ! hg tags | cut -f1 -d' ' | grep -Fx $t; then
- echo "Tag $t not found, ketching up"
- if [[ $t == *-* ]]; then
- # rc/pre/git versions start at the previous stable release
- micro=${t%%-*}; micro=${micro##*.}
- stable=${t%%-*}; stable=${stable%.*}.$((micro-1))
- hg up -C $stable
- else
- hg up -C ${t%.*} || exit 1
- fi
- ketchup ${t#v} || exit 1
- hg addremove
- hg ci -m $t
- hg tag -l $t
- fi
-done
-hg up -C $OLDTAG || exit 1
-
-cd $WD
-for i in $(hg manifest | awk '{print($3)}' | grep $SPARSEDIR | grep "$ARCH"); do
- cd $WD
-
- FILENAME=$(basename $i)
- DIRNAME=$(dirname $i)
- DIFFPATH=$(echo $i | sed -e "s,^$SPARSEDIR,$LINUXPATH,")
-
- if [ ! -d $DIRNAME ]; then
- echo "Hmm, something bad happened parsing directory name: $i"
- continue
- fi
-
- if [ ! -e $DIFFPATH ]; then
- continue
- fi
-
- echo -n "$i ... "
-
- cd $DIRNAME
- XENDIR=$(pwd)
-
- ORIGPATH=$(echo $i | sed -e "s/^$SPARSEDIR/./")
- APATH=$(echo $i | sed -e "s/^$SPARSEDIR/a/")
- BPATH=$(echo $i | sed -e "s/^$SPARSEDIR/b/")
- cd $LINUXPATH
- hg diff -r $OLDTAG -r $NEWTAG $ORIGPATH | \
- sed -e "s,^--- $APATH,--- $FILENAME," \
- -e "s,^+++ $BPATH,+++ $FILENAME," \
- > $XENDIR/$FILENAME-$OLDTAG-$NEWTAG.diff
- cd $XENDIR
-
- # Do we have a diff file? Did anything change?
- if [ ! -s $FILENAME-$OLDTAG-$NEWTAG.diff ]; then
- echo "SUCCESS (Upstream unchanged)"
- continue
- fi
-
- if ! patch -f -i $FILENAME-$OLDTAG-$NEWTAG.diff > /dev/null 2>&1; then
- # It failed, how badly?
- if [ ! -e ${FILENAME}.rej ]; then
- echo "ERROR, Hmm, no .rej file, but diff failed, fix manually"
- continue
- fi
- TONEWREJ=$(wc -l ${FILENAME}.rej | \
- awk '{print($1)}')
- hg st $FILENAME | grep -q . && hg revert $FILENAME
- rm -f ${FILENAME}.rej ${FILENAME}.orig
- diff -uN $DIFFPATH $FILENAME | \
- sed -e "s,^--- $DIFFPATH,--- $FILENAME," \
- > $FILENAME-$OLDTAG-Xen.diff
-
- if [ ! -e $FILENAME-$OLDTAG-Xen.diff ]; then
- echo "ERROR, failed to create patch file"
- continue
- fi
-
- if ! patch -R -i $FILENAME-$OLDTAG-Xen.diff > /dev/null 2>&1; then
- echo "ERROR, reverting Xen changes failed"
- hg revert $FILENAME
- continue
- fi
-
- if ! patch -f -i $FILENAME-$OLDTAG-$NEWTAG.diff > /dev/null 2>&1; then
- echo "ERROR, new upstream patch failed on reverted file"
- hg revert $FILENAME
- continue
- fi
-
- if ! patch -f -i $FILENAME-$OLDTAG-Xen.diff > /dev/null 2>&1; then
- if [ ! -e ${FILENAME}.rej ]; then
- echo "ERROR, Hmm, no .rej file, but diff failed, fix manually"
- continue
- fi
- TOXENREJ=$(wc -l ${FILENAME}.rej | \
- awk '{print($1)}')
-
- if [ $TOXENREJ -gt $TONEWREJ ]; then
- hg revert $FILENAME
- rm -f ${FILENAME}.rej ${FILENAME}.orig
- patch -f -i $FILENAME-$OLDTAG-$NEWTAG.diff > /dev/null 2>&1
- echo "MANUAL MERGE REQUIRED (Upstream reject)"
- else
- echo "MANUAL MERGE REQUIRED (Xen reject)"
- fi
-
- else
- rm -f ${FILENAME}.rej ${FILENAME}.orig
- echo "SUCCESS (Re-applied Xen patch)"
- fi
- else
- rm -f ${FILENAME}.rej ${FILENAME}.orig
- echo "SUCCESS (Upstream applied)"
- fi
-done
-find $SPARSEDIR -name \*.diff -empty | xargs -r rm -f
-cd $LINUXPATH
-hg up -C $OLDCSET
diff --git a/xen/arch/ia64/tools/xelilo/elilo.README b/xen/arch/ia64/tools/xelilo/elilo.README
deleted file mode 100755
index 5b19538d27..0000000000
--- a/xen/arch/ia64/tools/xelilo/elilo.README
+++ /dev/null
@@ -1,20 +0,0 @@
-Elilo update for Xen/ia64 HowTo 2005/12/01
-
- Xen support is committed to ELILO CVS as of Dec 1, 2005. This support
- should be in version 3.5pre2 when it is made available. To build from
- source:
-
-1. Build the new elilo
- a. Get current elilo CVS from http://sourceforge.net/projects/elilo
- b. make (Note that gnu-efi is a build dependency)
-
-2. How to run with the new elilo.efi?
- a. Example to run
- modify elilo.conf with following entry
-
- image=XenoLinux.gz
- label=xen
- vmm=xen.gz
- initrd=initrd-2.6.9-5.7.EL.img
- read-only
- append="com2=57600,8n1 console=com2 -- nomca console=ttyS1,57600 console=tty0 root=/dev/sda3"
diff --git a/xen/arch/ia64/tools/xelilo/xlilo.efi b/xen/arch/ia64/tools/xelilo/xlilo.efi
deleted file mode 100755
index 4c9714e088..0000000000
--- a/xen/arch/ia64/tools/xelilo/xlilo.efi
+++ /dev/null
Binary files differ
diff --git a/xen/arch/ia64/vmx/Makefile b/xen/arch/ia64/vmx/Makefile
deleted file mode 100644
index 1e0ad124e5..0000000000
--- a/xen/arch/ia64/vmx/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-obj-y += viosapic.o
-#obj-y += mm.o
-obj-y += mmio.o
-obj-y += pal_emul.o
-obj-y += vlsapic.o
-obj-y += vmmu.o
-obj-y += vmx_entry.o
-obj-y += vmx_hypercall.o
-obj-y += vmx_init.o
-obj-y += vmx_interrupt.o
-obj-y += vmx_ivt.o
-obj-y += vmx_phy_mode.o
-obj-y += vmx_fault.o
-obj-y += vmx_support.o
-obj-y += vmx_utility.o
-obj-y += vmx_vcpu.o
-obj-y += vmx_virt.o
-obj-y += vmx_vsa.o
-obj-y += vtlb.o
-obj-y += optvfault.o
-obj-y += vacpi.o
-obj-y += vmx_vcpu_save.o
-obj-y += save.o
-obj-y += sioemu.o
diff --git a/xen/arch/ia64/vmx/mmio.c b/xen/arch/ia64/vmx/mmio.c
deleted file mode 100644
index b26b96b458..0000000000
--- a/xen/arch/ia64/vmx/mmio.c
+++ /dev/null
@@ -1,560 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * mmio.c: MMIO emulation components.
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
- */
-
-#include <linux/sched.h>
-#include <xen/mm.h>
-#include <asm/vmx_mm_def.h>
-#include <asm/gcc_intrin.h>
-#include <linux/interrupt.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/bundle.h>
-#include <asm/types.h>
-#include <public/hvm/ioreq.h>
-#include <asm/vmx.h>
-#include <public/event_channel.h>
-#include <public/xen.h>
-#include <linux/event.h>
-#include <xen/domain.h>
-#include <asm/viosapic.h>
-#include <asm/vlsapic.h>
-#include <asm/hvm/vacpi.h>
-#include <asm/hvm/support.h>
-#include <public/hvm/save.h>
-#include <public/arch-ia64/hvm/memmap.h>
-#include <public/arch-ia64/sioemu.h>
-#include <asm/sioemu.h>
-
-#define HVM_BUFFERED_IO_RANGE_NR 1
-
-struct hvm_buffered_io_range {
- unsigned long start_addr;
- unsigned long length;
-};
-
-static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
-static struct hvm_buffered_io_range
-*hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
-{
- &buffered_stdvga_range
-};
-
-static int hvm_buffered_io_intercept(ioreq_t *p)
-{
- struct vcpu *v = current;
- buffered_iopage_t *pg =
- (buffered_iopage_t *)(v->domain->arch.hvm_domain.buf_ioreq.va);
- buf_ioreq_t bp;
- int i, qw = 0;
-
- /* Ensure buffered_iopage fits in a page */
- BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
-
- /* ignore READ ioreq_t and anything buffered io can't deal with */
- if (p->dir == IOREQ_READ || p->addr > 0xFFFFFUL ||
- p->data_is_ptr || p->count != 1)
- return 0;
-
- for (i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++) {
- if (p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
- p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
- hvm_buffered_io_ranges[i]->length)
- break;
- }
-
- if (i == HVM_BUFFERED_IO_RANGE_NR)
- return 0;
-
- bp.type = p->type;
- bp.dir = p->dir;
- switch (p->size) {
- case 1:
- bp.size = 0;
- break;
- case 2:
- bp.size = 1;
- break;
- case 4:
- bp.size = 2;
- break;
- case 8:
- bp.size = 3;
- qw = 1;
- break;
- default:
- gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
- return 0;
- }
- bp.data = p->data;
- bp.addr = p->addr;
-
- spin_lock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
-
- if (pg->write_pointer - pg->read_pointer >= IOREQ_BUFFER_SLOT_NUM - qw) {
- /* the queue is full.
- * send the iopacket through the normal path.
- * NOTE: The arithimetic operation could handle the situation for
- * write_pointer overflow.
- */
- spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
- return 0;
- }
-
- memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
- &bp, sizeof(bp));
-
- if (qw) {
- bp.data = p->data >> 32;
- memcpy(&pg->buf_ioreq[(pg->write_pointer + 1) % IOREQ_BUFFER_SLOT_NUM],
- &bp, sizeof(bp));
- }
-
- /* Make the ioreq_t visible before write_pointer */
- wmb();
- pg->write_pointer += qw ? 2 : 1;
-
- spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
-
- return 1;
-}
-
-static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
-{
- struct vcpu *v = current;
- ioreq_t *p = get_vio(v);
-
- p->addr = pa;
- p->size = s;
- p->count = 1;
- if (dir == IOREQ_WRITE)
- p->data = *val;
- else
- p->data = 0;
- p->data_is_ptr = 0;
- p->dir = dir;
- p->df = 0;
- p->type = 1;
-
- if (hvm_buffered_io_intercept(p)) {
- p->state = STATE_IORESP_READY;
- vmx_io_assist(v);
- if (dir != IOREQ_READ)
- return;
- }
-
- vmx_send_assist_req(v);
- if (dir == IOREQ_READ)
- *val = p->data;
-
- return;
-}
-
-static int vmx_ide_pio_intercept(ioreq_t *p, u64 *val)
-{
- struct buffered_piopage *pio_page =
- (void *)(current->domain->arch.hvm_domain.buf_pioreq.va);
- spinlock_t *pio_lock;
- struct pio_buffer *piobuf;
- uint32_t pointer, page_offset;
-
- if (p->addr == 0x1F0)
- piobuf = &pio_page->pio[PIO_BUFFER_IDE_PRIMARY];
- else if (p->addr == 0x170)
- piobuf = &pio_page->pio[PIO_BUFFER_IDE_SECONDARY];
- else
- return 0;
-
- if (p->size != 2 && p->size != 4)
- return 0;
-
- pio_lock = &current->domain->arch.hvm_domain.buf_pioreq.lock;
- spin_lock(pio_lock);
-
- pointer = piobuf->pointer;
- page_offset = piobuf->page_offset;
-
- /* sanity check */
- if (page_offset + pointer < offsetof(struct buffered_piopage, buffer))
- goto unlock_out;
- if (page_offset + piobuf->data_end > PAGE_SIZE)
- goto unlock_out;
-
- if (pointer + p->size < piobuf->data_end) {
- uint8_t *bufp = (uint8_t *)pio_page + page_offset + pointer;
- if (p->dir == IOREQ_WRITE) {
- if (likely(p->size == 4 && (((long)bufp & 3) == 0)))
- *(uint32_t *)bufp = *val;
- else
- memcpy(bufp, val, p->size);
- } else {
- if (likely(p->size == 4 && (((long)bufp & 3) == 0))) {
- *val = *(uint32_t *)bufp;
- } else {
- *val = 0;
- memcpy(val, bufp, p->size);
- }
- }
- piobuf->pointer += p->size;
- spin_unlock(pio_lock);
-
- p->state = STATE_IORESP_READY;
- vmx_io_assist(current);
- return 1;
- }
-
- unlock_out:
- spin_unlock(pio_lock);
- return 0;
-}
-
-#define TO_LEGACY_IO(pa) (((pa)>>12<<2)|((pa)&0x3))
-
-static void __vmx_identity_mapping_save(int on,
- const struct identity_mapping* im,
- struct hvm_hw_ia64_identity_mapping *im_save)
-{
- im_save->on = !!on;
- if (!on) {
- im_save->pgprot = 0;
- im_save->key = 0;
- } else {
- im_save->pgprot = im->pgprot;
- im_save->key = im->key;
- }
-}
-
-static int vmx_identity_mappings_save(struct domain *d,
- hvm_domain_context_t *h)
-{
- const struct opt_feature *optf = &d->arch.opt_feature;
- struct hvm_hw_ia64_identity_mappings im_save;
-
- __vmx_identity_mapping_save(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4_FLG,
- &optf->im_reg4, &im_save.im_reg4);
- __vmx_identity_mapping_save(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5_FLG,
- &optf->im_reg5, &im_save.im_reg5);
- __vmx_identity_mapping_save(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG7_FLG,
- &optf->im_reg7, &im_save.im_reg7);
-
- return hvm_save_entry(OPT_FEATURE_IDENTITY_MAPPINGS, 0, h, &im_save);
-}
-
-static int __vmx_identity_mapping_load(struct domain *d, unsigned long cmd,
- const struct hvm_hw_ia64_identity_mapping *im_load)
-{
- struct xen_ia64_opt_feature optf;
-
- optf.cmd = cmd;
- optf.on = im_load->on;
- optf.pgprot = im_load->pgprot;
- optf.key = im_load->key;
-
- return domain_opt_feature(d, &optf);
-}
-
-static int vmx_identity_mappings_load(struct domain *d,
- hvm_domain_context_t *h)
-{
- struct hvm_hw_ia64_identity_mappings im_load;
- int rc;
-
- if (hvm_load_entry(OPT_FEATURE_IDENTITY_MAPPINGS, h, &im_load))
- return -EINVAL;
-
- rc = __vmx_identity_mapping_load(d, XEN_IA64_OPTF_IDENT_MAP_REG4,
- &im_load.im_reg4);
- if (rc)
- return rc;
- rc = __vmx_identity_mapping_load(d, XEN_IA64_OPTF_IDENT_MAP_REG5,
- &im_load.im_reg5);
- if (rc)
- return rc;
- rc = __vmx_identity_mapping_load(d, XEN_IA64_OPTF_IDENT_MAP_REG7,
- &im_load.im_reg7);
-
- return rc;
-}
-
-HVM_REGISTER_SAVE_RESTORE(OPT_FEATURE_IDENTITY_MAPPINGS,
- vmx_identity_mappings_save,
- vmx_identity_mappings_load,
- 1, HVMSR_PER_DOM);
-
-static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
-{
- struct vcpu *v = current;
- ioreq_t *p = get_vio(v);
-
- p->addr = TO_LEGACY_IO(pa & 0x3ffffffUL);
- p->size = s;
- p->count = 1;
- p->dir = dir;
- if (dir == IOREQ_WRITE)
- p->data = *val;
- else
- p->data = 0;
- p->data_is_ptr = 0;
- p->type = 0;
- p->df = 0;
-
- if (vmx_ide_pio_intercept(p, val))
- return;
-
- if (IS_ACPI_ADDR(p->addr) && vacpi_intercept(p, val))
- return;
-
- vmx_send_assist_req(v);
- if (dir == IOREQ_READ) { // read
- *val=p->data;
- }
-#ifdef DEBUG_PCI
- if (dir == IOREQ_WRITE)
- if (p->addr == 0xcf8UL)
- printk("Write 0xcf8, with val [0x%lx]\n", p->data);
- else
- if (p->addr == 0xcfcUL)
- printk("Read 0xcfc, with val [0x%lx]\n", p->data);
-#endif //DEBUG_PCI
- return;
-}
-
-static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir, u64 iot)
-{
- perfc_incra(vmx_mmio_access, iot & 0x7);
- switch (iot) {
- case GPFN_PIB:
- if (ma != 4)
- panic_domain(NULL, "Access PIB not with UC attribute\n");
-
- if (!dir)
- vlsapic_write(vcpu, src_pa, s, *dest);
- else
- *dest = vlsapic_read(vcpu, src_pa, s);
- break;
- case GPFN_IOSAPIC:
- if (!dir)
- viosapic_write(vcpu, src_pa, s, *dest);
- else
- *dest = viosapic_read(vcpu, src_pa, s);
- break;
- case GPFN_FRAME_BUFFER:
- case GPFN_LOW_MMIO:
- low_mmio_access(vcpu, src_pa, dest, s, dir);
- break;
- case GPFN_LEGACY_IO:
- legacy_io_access(vcpu, src_pa, dest, s, dir);
- break;
- default:
- panic_domain(NULL,"Bad I/O access\n");
- break;
- }
- return;
-}
-
-enum inst_type_en { SL_INTEGER, SL_FLOATING, SL_FLOATING_FP8 };
-
-/*
- dir 1: read 0:write
- */
-void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma, u64 iot)
-{
- REGS *regs;
- IA64_BUNDLE bundle;
- int slot, dir=0;
- enum inst_type_en inst_type;
- size_t size;
- u64 data, data1, temp, update_reg;
- s32 imm;
- INST64 inst;
- unsigned long update_word;
-
- regs = vcpu_regs(vcpu);
- if (IA64_RETRY == __vmx_get_domain_bundle(regs->cr_iip, &bundle)) {
- /* if fetch code fail, return and try again */
- return;
- }
- slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
- if (!slot)
- inst.inst = bundle.slot0;
- else if (slot == 1) {
- u64 slot1b = bundle.slot1b;
- inst.inst = bundle.slot1a + (slot1b << 18);
- }
- else if (slot == 2)
- inst.inst = bundle.slot2;
-
-
- // Integer Load/Store
- if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
- inst_type = SL_INTEGER;
- size = (inst.M1.x6 & 0x3);
- if ((inst.M1.x6 >> 2) > 0xb) {
- dir = IOREQ_WRITE;
- vcpu_get_gr_nat(vcpu, inst.M4.r2, &data);
- } else if ((inst.M1.x6 >> 2) < 0xb) {
- dir = IOREQ_READ;
- }
- }
- // Integer Load + Reg update
- else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
- inst_type = SL_INTEGER;
- dir = IOREQ_READ;
- size = (inst.M2.x6 & 0x3);
- vcpu_get_gr_nat(vcpu, inst.M2.r3, &temp);
- vcpu_get_gr_nat(vcpu, inst.M2.r2, &update_reg);
- temp += update_reg;
- vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
- }
- // Integer Load/Store + Imm update
- else if (inst.M3.major == 5) {
- inst_type = SL_INTEGER;
- size = (inst.M3.x6 & 0x3);
- if ((inst.M5.x6 >> 2) > 0xb) {
- dir = IOREQ_WRITE;
- vcpu_get_gr_nat(vcpu, inst.M5.r2, &data);
- vcpu_get_gr_nat(vcpu, inst.M5.r3, &temp);
- imm = (inst.M5.s << 31) | (inst.M5.i << 30) | (inst.M5.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
- } else if ((inst.M3.x6 >> 2) < 0xb) {
- dir = IOREQ_READ;
- vcpu_get_gr_nat(vcpu, inst.M3.r3, &temp);
- imm = (inst.M3.s << 31) | (inst.M3.i << 30) | (inst.M3.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
- }
- }
- // Floating-point spill
- else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B &&
- inst.M9.m == 0 && inst.M9.x == 0) {
- struct ia64_fpreg v;
-
- inst_type = SL_FLOATING;
- dir = IOREQ_WRITE;
- vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
- data1 = v.u.bits[1] & 0x3ffff;
- data = v.u.bits[0];
- size = 4;
- }
- // Floating-point spill + Imm update
- else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
- struct ia64_fpreg v;
-
- inst_type = SL_FLOATING;
- dir = IOREQ_WRITE;
- vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
- vcpu_get_gr_nat(vcpu, inst.M10.r3, &temp);
- imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
- data1 = v.u.bits[1] & 0x3ffff;
- data = v.u.bits[0];
- size = 4;
- }
- // Floating-point stf8 + Imm update
- else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
- struct ia64_fpreg v;
-
- inst_type = SL_FLOATING;
- dir = IOREQ_WRITE;
- size = 3;
- vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
- data = v.u.bits[0]; /* Significand. */
- vcpu_get_gr_nat(vcpu, inst.M10.r3, &temp);
- imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
- }
- // lfetch - do not perform accesses.
- else if (inst.M15.major== 7 && inst.M15.x6 >=0x2c && inst.M15.x6 <= 0x2f) {
- vcpu_get_gr_nat(vcpu, inst.M15.r3, &temp);
- imm = (inst.M15.s << 31) | (inst.M15.i << 30) | (inst.M15.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
-
- vcpu_increment_iip(vcpu);
- return;
- }
- // Floating-point Load Pair + Imm ldfp8 M12
- else if (inst.M12.major == 6 && inst.M12.m == 1
- && inst.M12.x == 1 && inst.M12.x6 == 1) {
- inst_type = SL_FLOATING_FP8;
- dir = IOREQ_READ;
- size = 4; //ldfd
- vcpu_set_gr(vcpu,inst.M12.r3,padr + 16, 0);
- }
- else {
- panic_domain
- (NULL, "This memory access instr can't be emulated: %lx pc=%lx\n",
- inst.inst, regs->cr_iip);
- }
-
- update_word = size | (dir << 7) | (ma << 8) | (inst_type << 12);
- if (dir == IOREQ_READ) {
- if (inst_type == SL_INTEGER)
- update_word |= (inst.M1.r1 << 16);
- else if (inst_type == SL_FLOATING_FP8)
- update_word |= (inst.M12.f1 << 16) | (inst.M12.f2 << 24);
- }
-
- if (vcpu->domain->arch.is_sioemu) {
- if (iot != GPFN_PIB && iot != GPFN_IOSAPIC) {
- sioemu_io_emulate(padr, data, data1, update_word);
- return;
- }
- }
-
- if (size == 4) {
- mmio_access(vcpu, padr + 8, &data1, 1 << 3, ma, dir, iot);
- size = 3;
- }
- mmio_access(vcpu, padr, &data, 1 << size, ma, dir, iot);
-
- emulate_io_update(vcpu, update_word, data, data1);
-}
-
-void
-emulate_io_update(VCPU *vcpu, u64 word, u64 data, u64 data1)
-{
- int dir = (word >> 7) & 1;
-
- if (dir == IOREQ_READ) {
- int r1 = (word >> 16) & 0xff;
- int r2 = (word >> 24) & 0xff;
- enum inst_type_en inst_type = (word >> 12) & 0x0f;
-
- if (inst_type == SL_INTEGER) {
- vcpu_set_gr(vcpu, r1, data, 0);
- } else if (inst_type == SL_FLOATING_FP8) {
- struct ia64_fpreg v;
-
- v.u.bits[0] = data;
- v.u.bits[1] = 0x1003E;
- vcpu_set_fpreg(vcpu, r1, &v);
- v.u.bits[0] = data1;
- v.u.bits[1] = 0x1003E;
- vcpu_set_fpreg(vcpu, r2, &v);
- } else {
- panic_domain(NULL, "Don't support ldfd now !");
- }
- }
- vcpu_increment_iip(vcpu);
-}
diff --git a/xen/arch/ia64/vmx/optvfault.S b/xen/arch/ia64/vmx/optvfault.S
deleted file mode 100644
index 896ea31be9..0000000000
--- a/xen/arch/ia64/vmx/optvfault.S
+++ /dev/null
@@ -1,1184 +0,0 @@
-/*
- * arch/ia64/vmx/optvfault.S
- * optimize virtualization fault handler
- *
- * Copyright (C) 2006 Intel Co
- * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
- */
-
-#include <asm/pgtable.h>
-#include <asm/asmmacro.h>
-#include <asm/kregs.h>
-#include <asm/offsets.h>
-#include <asm/percpu.h>
-#include <asm/processor.h>
-#include <asm/vmx_vpd.h>
-#include <asm/vmx_pal_vsa.h>
-#include <asm/asm-offsets.h>
-#include <asm/virt_event.h>
-#include <asm-ia64/vmx_mm_def.h>
-#include <asm-ia64/vmx_phy_mode.h>
-#include "entry.h"
-
-// r21 : current
-// r23 : b0
-// r31 : pr
-
-#define VMX_VIRT_SAVE \
- mov r27=ar.rsc; /* M */ \
- ;; \
- cover; /* B;; (or nothing) */ \
- ;; \
- /* switch from user to kernel RBS: */ \
- invala; /* M */ \
- ;; \
- mov ar.rsc=0; /* set enforced lazy mode */ \
- ;; \
- mov.m r26=ar.rnat; \
- movl r28=IA64_RBS_OFFSET; /* compute base of RBS */ \
- ;; \
- mov r22=ar.bspstore; /* save ar.bspstore */ \
- add r28=r28,r21; \
- ;; \
- mov ar.bspstore=r28; /* switch to kernel RBS */ \
- ;; \
- mov r18=ar.bsp; \
- mov ar.rsc=0x3; /* set eager mode */ \
- ;; \
- alloc r32=ar.pfs,24,0,3,0 /* save pfs */ \
- ;; \
- sub r18=r18,r28; /* r18=RSE.ndirty*8 */ \
- ;; \
- shl r33=r18,16; /* save loadrs */ \
- mov r35=b6; /* save b6 */ \
- mov r36=b7; /* save b7 */ \
- mov r37=ar.csd; /* save ar.csd */ \
- mov r38=ar.ssd; /* save ar.ssd */ \
- mov r39=r8; /* save r8 */ \
- mov r40=r9; /* save r9 */ \
- mov r41=r10; /* save r10 */ \
- mov r42=r11; /* save r11 */ \
- mov r43=r27; /* save ar.rsc */ \
- mov r44=r26; /* save ar.rnat */ \
- mov r45=r22; /* save ar.bspstore */ \
- mov r46=r31; /* save pr */ \
- mov r47=r23; /* save b0 */ \
- mov r48=r1; /* save r1 */ \
- mov r49=r12; /* save r12 */ \
- mov r50=r13; /* save r13 */ \
- mov r51=r15; /* save r15 */ \
- mov r52=r14; /* save r14 */ \
- mov r53=r2; /* save r2 */ \
- mov r54=r3; /* save r3 */ \
- mov r34=ar.ccv; /* save ar.ccv */ \
- ;; \
- movl r1=__gp; \
- movl r29=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16; \
- ;; \
- add r12=r29,r21; /* compute base of memory stack */ \
- mov r13=r21; \
- ;; \
-{ .mii; /* call vps sync read */ \
- add r25=IA64_VPD_BASE_OFFSET, r21; \
- nop 0x0; \
- mov r24=ip; \
- ;; \
-}; \
-{ .mmb; \
- add r24 = 0x20, r24; \
- ld8 r25=[r25]; /* read vpd base */ \
- br.cond.sptk vmx_vps_sync_read; /* call the service */ \
- ;; \
-};
-
-
-ENTRY(ia64_leave_hypervisor_virt)
- invala /* M */
- ;;
- mov r21=r13 /* get current */
- mov b6=r35 /* restore b6 */
- mov b7=r36 /* restore b7 */
- mov ar.csd=r37 /* restore ar.csd */
- mov ar.ssd=r38 /* restore ar.ssd */
- mov r8=r39 /* restore r8 */
- mov r9=r40 /* restore r9 */
- mov r10=r41 /* restore r10 */
- mov r11=r42 /* restore r11 */
- mov ar.pfs=r32 /* restore ar.pfs */
- mov r27=r43 /* restore ar.rsc */
- mov r26=r44 /* restore ar.rnat */
- mov r25=r45 /* restore ar.bspstore */
- mov r23=r46 /* restore predicates */
- mov r22=r47 /* restore b0 */
- mov r1=r48 /* restore r1 */
- mov r12=r49 /* restore r12 */
- mov r13=r50 /* restore r13 */
- mov r15=r51 /* restore r15 */
- mov r14=r52 /* restore r14 */
- mov r2=r53 /* restore r2 */
- mov r3=r54 /* restore r3 */
- mov ar.ccv=r34 /* restore ar.ccv */
- mov ar.rsc=r33 /* load ar.rsc to be used for "loadrs" */
- ;;
- alloc r16=ar.pfs,0,0,0,0 /* drop current register frame */
- ;;
- loadrs
- ;;
- mov ar.bspstore=r25
- ;;
- mov ar.rnat=r26
- ;;
- mov ar.rsc=r27
- adds r18=IA64_VPD_BASE_OFFSET,r21
- ;;
- ld8 r25=[r18] // load vpd
- mov r17=r0
- ;;
-//vsa_sync_write_start
- ;;
- movl r24=ia64_leave_hypervisor_virt_1 // calculate return address
- br.cond.sptk vmx_vps_sync_write // call the service
- ;;
-ia64_leave_hypervisor_virt_1:
- mov r24=r22
- mov r31=r23
- br.cond.sptk vmx_resume_to_guest
-END(ia64_leave_hypervisor_virt)
-
-
-
-// Inputs are: r21 (= current), r24 (= cause), r25 (= insn), r31 (=saved pr)
-
-#define BACK_TO_SLOW_PATH \
-{; \
- nop.m 0x0; \
- mov b0=r23; \
- br.many vmx_virtualization_fault_back; \
-}; \
-
-GLOBAL_ENTRY(virtualization_fault_table)
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
-{ /* Entry 3 */
- cmp.eq p2,p0=r0,r0
- mov b0=r23
- br.many vmx_asm_mov_from_ar
-}
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
-{ /* Entry 6 */
- cmp.eq p2,p0=r0,r0
- mov b0=r23
- br.many vmx_asm_mov_to_psr
-}
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
-{ /* Entry 10 */
- cmp.eq p2,p0=r0,r0
- mov b0=r23
- br.many vmx_asm_mov_to_rr
-}
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
-{ /* Entry 18 */
- cmp.eq p2,p0=r0,r0
- mov b0=r23
- br.many vmx_asm_mov_from_rr
-}
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
-{ /* Entry 24 */
- cmp.eq p2,p0=r0,r0
- mov b0=r23
- br.many vmx_asm_ssm
-}
-{ /* Entry 25 */
- cmp.eq p2,p0=r0,r0
- mov b0=r23
- br.many vmx_asm_rsm
-}
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
-{ /* Entry 31 */
- cmp.eq p2,p0=r0,r0
- mov b0=r23
- br.many vmx_asm_thash
-}
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
-{ /* Entry 37 */
- cmp.ne p2,p0=r0,r0
- mov b0=r23
- br.many vmx_asm_rfi
-}
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
- BACK_TO_SLOW_PATH
-END(virtualization_fault_table)
-
-
-ENTRY(vmx_dummy_function)
- br.sptk.many vmx_dummy_function
-END(vmx_dummy_function)
-
-/*
- * Inputs:
- * r24 : return address
- * r25 : vpd
- * r29 : scratch
- *
- */
-GLOBAL_ENTRY(vmx_vps_sync_read)
- movl r29 = vmx_dummy_function
- ;;
- mov b0=r29
- br.sptk.many b0
-END(vmx_vps_sync_read)
-
-/*
- * Inputs:
- * r24 : return address
- * r25 : vpd
- * r29 : scratch
- */
-GLOBAL_ENTRY(vmx_vps_sync_write)
- movl r29 = vmx_dummy_function
- ;;
- mov b0=r29
- br.sptk.many b0
-END(vmx_vps_sync_write)
-
-/*
- * Inputs:
- * r23 : pr
- * r24 : guest b0
- * r25 : vpd
- */
-GLOBAL_ENTRY(vmx_vps_resume_normal)
- movl r29 = vmx_dummy_function
- ;;
- mov b0=r29
- mov pr=r23,-2
- br.sptk.many b0
-END(vmx_vps_resume_normal)
-
-/*
- * Inputs:
- * r23 : pr
- * r24 : guest b0
- * r25 : vpd
- * r17 : isr
- */
-GLOBAL_ENTRY(vmx_vps_resume_handler)
- movl r29 = vmx_dummy_function
- ;;
- ld8 r26=[r25]
- shr r17=r17,IA64_ISR_IR_BIT
- ;;
- dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE
- mov b0=r29
- mov pr=r23,-2
- br.sptk.many b0
-END(vmx_vps_resume_handler)
-
-//r13 ->vcpu
-//call with psr.bn = 0
-GLOBAL_ENTRY(vmx_asm_bsw0)
- mov r15=ar.unat
- ;;
- adds r14=IA64_VPD_BASE_OFFSET,r13
- ;;
- ld8 r14=[r14]
- bsw.1
- ;;
- adds r2=IA64_VPD_VB1REG_OFFSET, r14
- adds r3=IA64_VPD_VB1REG_OFFSET+8, r14
- ;;
- .mem.offset 0,0; st8.spill [r2]=r16,16
- .mem.offset 8,0; st8.spill [r3]=r17,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r18,16
- .mem.offset 8,0; st8.spill [r3]=r19,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r20,16
- .mem.offset 8,0; st8.spill [r3]=r21,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r22,16
- .mem.offset 8,0; st8.spill [r3]=r23,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r24,16
- .mem.offset 8,0; st8.spill [r3]=r25,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r26,16
- .mem.offset 8,0; st8.spill [r3]=r27,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r28,16
- .mem.offset 8,0; st8.spill [r3]=r29,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r30,16
- .mem.offset 8,0; st8.spill [r3]=r31,16
- ;;
- mov r9=ar.unat
- adds r8=IA64_VPD_VB1NAT_OFFSET, r14
- ;;
- st8 [r8]=r9
- adds r8=IA64_VPD_VB0NAT_OFFSET, r14
- ;;
- ld8 r9=[r8]
- adds r2= IA64_VPD_VB0REG_OFFSET, r14
- adds r3= IA64_VPD_VB0REG_OFFSET+8, r14
- ;;
- mov ar.unat=r9
- ;;
- ld8.fill r16=[r2],16
- ld8.fill r17=[r3],16
- ;;
- ld8.fill r18=[r2],16
- ld8.fill r19=[r3],16
- ;;
- ld8.fill r20=[r2],16
- ld8.fill r21=[r3],16
- ;;
- ld8.fill r22=[r2],16
- ld8.fill r23=[r3],16
- ;;
- ld8.fill r24=[r2],16
- ld8.fill r25=[r3],16
- ;;
- ld8.fill r26=[r2],16
- ld8.fill r27=[r3],16
- ;;
- ld8.fill r28=[r2],16
- ld8.fill r29=[r3],16
- ;;
- ld8.fill r30=[r2],16
- ld8.fill r31=[r3],16
- ;;
- mov ar.unat=r15
- ;;
- bsw.0
- ;;
- br.ret.sptk.many b0
-END(vmx_asm_bsw0)
-
-//r13 ->vcpu
-//call with psr.bn = 0
-GLOBAL_ENTRY(vmx_asm_bsw1)
- mov r15=ar.unat
- ;;
- adds r14=IA64_VPD_BASE_OFFSET,r13
- ;;
- ld8 r14=[r14]
- bsw.1
- ;;
- adds r2=IA64_VPD_VB0REG_OFFSET, r14
- adds r3=IA64_VPD_VB0REG_OFFSET+8, r14
- ;;
- .mem.offset 0,0; st8.spill [r2]=r16,16
- .mem.offset 8,0; st8.spill [r3]=r17,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r18,16
- .mem.offset 8,0; st8.spill [r3]=r19,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r20,16
- .mem.offset 8,0; st8.spill [r3]=r21,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r22,16
- .mem.offset 8,0; st8.spill [r3]=r23,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r24,16
- .mem.offset 8,0; st8.spill [r3]=r25,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r26,16
- .mem.offset 8,0; st8.spill [r3]=r27,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r28,16
- .mem.offset 8,0; st8.spill [r3]=r29,16
- ;;
- .mem.offset 0,0; st8.spill [r2]=r30,16
- .mem.offset 8,0; st8.spill [r3]=r31,16
- ;;
- mov r9=ar.unat
- adds r8=IA64_VPD_VB0NAT_OFFSET, r14
- ;;
- st8 [r8]=r9
- adds r8=IA64_VPD_VB1NAT_OFFSET, r14
- ;;
- ld8 r9=[r8]
- adds r2=IA64_VPD_VB1REG_OFFSET, r14
- adds r3=IA64_VPD_VB1REG_OFFSET+8, r14
- ;;
- mov ar.unat=r9
- ;;
- ld8.fill r16=[r2],16
- ld8.fill r17=[r3],16
- ;;
- ld8.fill r18=[r2],16
- ld8.fill r19=[r3],16
- ;;
- ld8.fill r20=[r2],16
- ld8.fill r21=[r3],16
- ;;
- ld8.fill r22=[r2],16
- ld8.fill r23=[r3],16
- ;;
- ld8.fill r24=[r2],16
- ld8.fill r25=[r3],16
- ;;
- ld8.fill r26=[r2],16
- ld8.fill r27=[r3],16
- ;;
- ld8.fill r28=[r2],16
- ld8.fill r29=[r3],16
- ;;
- ld8.fill r30=[r2],16
- ld8.fill r31=[r3],16
- ;;
- mov ar.unat=r15
- ;;
- bsw.0
- ;;
- br.ret.sptk.many b0
-END(vmx_asm_bsw1)
-
-
-// rfi
-ENTRY(vmx_asm_rfi)
- adds r18=IA64_VPD_BASE_OFFSET,r21
- ;;
- ld8 r18=[r18]
- ;;
- adds r26=IA64_VPD_VIFS_OFFSET,r18
- ;;
- ld8 r26=[r26]
- ;;
- tbit.z p6,p0=r26,63
- (p6) br.cond.dptk.few vmx_asm_rfi_1
- ;;
- //if vifs.v=1 desert current register frame
- alloc r27=ar.pfs,0,0,0,0
- ;;
-vmx_asm_rfi_1:
- adds r26=IA64_VPD_VHPI_OFFSET,r18
- ;;
- ld8 r26=[r26]
- ;;
- cmp.ne p6,p0=r26,r0
- (p6) br.cond.dpnt.many vmx_virtualization_fault_back
- ;;
- VMX_VIRT_SAVE
- ;;
- mov out0=r21
- movl r14=ia64_leave_hypervisor_virt
- ;;
- mov rp=r14
- br.call.sptk.many b6=vmx_vcpu_rfi_fast
-END(vmx_asm_rfi)
-
-
-//mov r1=ar3 (only itc is virtualized)
-ENTRY(vmx_asm_mov_from_ar)
- add r18=VCPU_VTM_OFFSET_OFS,r21
- add r16=VCPU_VTM_LAST_ITC_OFS,r21
- extr.u r17=r25,6,7
- ;;
- ld8 r18=[r18]
- mov r19=ar.itc
- mov r24=b0
- ;;
- ld8 r16=[r16]
- add r19=r19,r18
- movl r20=asm_mov_to_reg
- ;;
- adds r30=vmx_resume_to_guest-asm_mov_to_reg,r20
- shladd r17=r17,4,r20
- cmp.gtu p6,p0=r16,r19
- ;;
- (p6) mov r19=r16
- mov b0=r17
- br.sptk.few b0
- ;;
-END(vmx_asm_mov_from_ar)
-
-
-// mov r1=rr[r3]
-ENTRY(vmx_asm_mov_from_rr)
- extr.u r16=r25,20,7
- extr.u r17=r25,6,7
- movl r20=asm_mov_from_reg
- ;;
- adds r30=vmx_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
- shladd r16=r16,4,r20
- mov r24=b0
- ;;
- add r27=VCPU_VRR0_OFS,r21
- mov b0=r16
- br.many b0
- ;;
-vmx_asm_mov_from_rr_back_1:
- adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
- adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
- shr.u r26=r19,61
- ;;
- shladd r17=r17,4,r22
- shladd r27=r26,3,r27
- ;;
- ld8 r19=[r27]
- mov b0=r17
- br.many b0
-END(vmx_asm_mov_from_rr)
-
-
-// mov rr[r3]=r2
-ENTRY(vmx_asm_mov_to_rr)
- extr.u r16=r25,20,7 // r3
- extr.u r17=r25,13,7 // r2
- movl r20=asm_mov_from_reg
- ;;
- adds r30=vmx_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
- shladd r16=r16,4,r20 // get r3
- ;;
- mov b0=r16
- br.many b0
- ;;
-vmx_asm_mov_to_rr_back_1:
- adds r30=vmx_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
- shr.u r16=r19,61 // get RR #
- ;;
- //if rr7, go back
- cmp.eq p6,p0=7,r16
- mov b0=r23// restore b0
- (p6) br.cond.dpnt.many vmx_virtualization_fault_back
- ;;
- mov r16=r19
- shladd r17=r17,4,r20 // get r2
- ;;
- mov b0=r17
- br.many b0
-vmx_asm_mov_to_rr_back_2:
- mov r17=r19 // get value
- ;;
- // if invalid value , go back
- adds r26=IA64_VCPU_RID_BITS_OFFSET,r21
- mov r27=r0
- ;;
- ld1 r27=[r26]
- ;;
- shr r19=r19,r27
- ;;
- cmp.ne p6,p0=r19,r0
- mov b0=r23// restore b0
- (p6) br.cond.dpnt.many vmx_virtualization_fault_back
- ;;
- VMX_VIRT_SAVE
- ;;
- mov out0=r21
- mov out1=r16
- mov out2=r17
- movl r14=ia64_leave_hypervisor_virt
- ;;
- mov rp=r14
- br.call.sptk.many b6=vmx_vcpu_set_rr_fast
-END(vmx_asm_mov_to_rr)
-
-
-//rsm 25
-ENTRY(vmx_asm_rsm)
- extr.u r26=r25,6,21 // Imm21
- extr.u r27=r25,31,2 // I2d
- ;;
- extr.u r28=r25,36,1 // I
- dep r26=r27,r26,21,2
- ;;
- //r18 is imm24
- dep r16=r28,r26,23,1
- ;;
- VMX_VIRT_SAVE
- ;;
- mov out0=r21
- mov out1=r16
- movl r14=ia64_leave_hypervisor_virt
- ;;
- mov rp=r14
- br.call.sptk.many b6=vmx_vcpu_rsm_fast
-END(vmx_asm_rsm)
-
-
-//ssm 24
-ENTRY(vmx_asm_ssm)
- adds r18=IA64_VPD_BASE_OFFSET,r21
- ;;
- ld8 r18=[r18]
- ;;
- adds r26=IA64_VPD_VHPI_OFFSET,r18
- ;;
- ld8 r26=[r26]
- ;;
- cmp.ne p6,p0=r26,r0
- (p6) br.cond.dpnt.many vmx_virtualization_fault_back
- ;;
- extr.u r26=r25,6,21
- extr.u r27=r25,31,2
- ;;
- extr.u r28=r25,36,1
- dep r26=r27,r26,21,2
- ;; //r18 is imm24
- dep r16=r28,r26,23,1
- ;;
- VMX_VIRT_SAVE
- ;;
- mov out0=r21
- mov out1=r16
- movl r14=ia64_leave_hypervisor_virt
- ;;
- mov rp=r14
- br.call.sptk.many b6=vmx_vcpu_ssm_fast
-END(vmx_asm_ssm)
-
-
-//mov psr.l=r2
-ENTRY(vmx_asm_mov_to_psr)
- extr.u r26=r25,13,7 //r2
- movl r27=asm_mov_from_reg
- ;;
- adds r30=vmx_asm_mov_to_psr_back-asm_mov_from_reg,r27
- shladd r26=r26,4,r27
- ;;
- mov b0=r26
- br.many b0
- ;;
-vmx_asm_mov_to_psr_back:
- adds r18=IA64_VPD_BASE_OFFSET,r21
- tbit.nz p6,p0 = r19, IA64_PSR_I_BIT
- ;;
- ld8 r18=[r18]
- ;;
- adds r26=IA64_VPD_VHPI_OFFSET,r18
- ;;
- ld8 r26=[r26]
- ;;
- // if enable interrupt and vhpi has value, return
- cmp.ne.and p6,p0=r26,r0
- (p6) br.cond.dpnt.many vmx_virtualization_fault_back
- ;;
- mov r16=r19
- ;;
- VMX_VIRT_SAVE
- ;;
- mov out0=r21
- mov out1=r16
- movl r14=ia64_leave_hypervisor_virt
- ;;
- mov rp=r14
- br.call.sptk.many b6=vmx_vcpu_mov_to_psr_fast
-END(vmx_asm_mov_to_psr)
-
-
-// thash r1=r3
-// TODO: add support when pta.vf = 1
-ENTRY(vmx_asm_thash)
- extr.u r17=r25,20,7 // get r3 from opcode in r25
- extr.u r18=r25,6,7 // get r1 from opcode in r25
- movl r20=asm_mov_from_reg
- ;;
- adds r30=vmx_asm_thash_back1-asm_mov_from_reg,r20
- shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
- adds r16=IA64_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
- mov r24=b0 // save b0
- ;;
- ld8 r16=[r16] // get VPD addr
- mov b0=r17
- br.many b0 // r19 return value
- ;;
-vmx_asm_thash_back1:
- shr.u r23=r19,61 // get RR number
- adds r28=VCPU_VRR0_OFS,r21 // get vcpu->arch.arch_vmx.vrr[0]'s addr
- adds r16=IA64_VPD_VPTA_OFFSET,r16 // get virtual pta
- ;;
- shladd r27=r23,3,r28 // get vcpu->arch.arch_vmx.vrr[r23]'s addr
- ld8 r17=[r16] // get virtual PTA
- mov r26=1
- ;;
- extr.u r29=r17,2,6// get pta.size
- ld8 r28=[r27] // get vcpu->arch.arch_vmx.vrr[r23]'s value
- ;;
- // Fall-back to C if VF (long format) is set
- tbit.nz p6,p0=r17,8
- mov b0=r24
- ;;
- (p6) mov r24=EVENT_THASH
- (p6) br.cond.dpnt.many vmx_virtualization_fault_back
- extr.u r28=r28,2,6 // get rr.ps
- shl r22=r26,r29 // 1UL << pta.size
- ;;
- shr.u r23=r19,r28 // vaddr >> rr.ps
- adds r26=3,r29 // pta.size + 3
- shl r27=r17,3 // pta << 3
- ;;
- shl r23=r23,3 // (vaddr >> rr.ps) << 3
- shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
- movl r16=VRN_MASK
- ;;
- adds r22=-1,r22 // (1UL << pta.size) - 1
- shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
- and r19=r19,r16 // vaddr & VRN_MASK
- ;;
- and r22=r22,r23 // vhpt_offset
- or r19=r19,r27 // (vadr&VRN_MASK) |(((pta<<3)>>(pta.size + 3))<<pta.size)
- adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
- ;;
- or r19=r19,r22 // calc pval
- shladd r17=r18,4,r26
- adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
- ;;
- mov b0=r17
- br.many b0
-END(vmx_asm_thash)
-
-
-
-#define MOV_TO_REG0 \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- nop.b 0x0; \
- ;; \
-};
-
-
-#define MOV_TO_REG(n) \
-{; \
- mov r##n##=r19; \
- mov b0=r30; \
- br.sptk.many b0; \
- ;; \
-};
-
-
-#define MOV_FROM_REG(n) \
-{; \
- mov r19=r##n##; \
- mov b0=r30; \
- br.sptk.many b0; \
- ;; \
-};
-
-
-#define MOV_TO_BANK0_REG(n) \
-ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
-{; \
- mov r26=r2; \
- mov r2=r19; \
- bsw.1; \
- ;; \
-}; \
-{; \
- mov r##n##=r2; \
- nop.b 0x0; \
- bsw.0; \
- ;; \
-}; \
-{; \
- mov r2=r26; \
- mov b0=r30; \
- br.sptk.many b0; \
- ;; \
-}; \
-END(asm_mov_to_bank0_reg##n##)
-
-
-#define MOV_FROM_BANK0_REG(n) \
-ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
-{; \
- mov r26=r2; \
- nop.b 0x0; \
- bsw.1; \
- ;; \
-}; \
-{; \
- mov r2=r##n##; \
- nop.b 0x0; \
- bsw.0; \
- ;; \
-}; \
-{; \
- mov r19=r2; \
- mov r2=r26; \
- mov b0=r30; \
-}; \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- br.sptk.many b0; \
- ;; \
-}; \
-END(asm_mov_from_bank0_reg##n##)
-
-
-#define JMP_TO_MOV_TO_BANK0_REG(n) \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- br.sptk.many asm_mov_to_bank0_reg##n##; \
- ;; \
-}
-
-
-#define JMP_TO_MOV_FROM_BANK0_REG(n) \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- br.sptk.many asm_mov_from_bank0_reg##n##; \
- ;; \
-}
-
-
-MOV_FROM_BANK0_REG(16)
-MOV_FROM_BANK0_REG(17)
-MOV_FROM_BANK0_REG(18)
-MOV_FROM_BANK0_REG(19)
-MOV_FROM_BANK0_REG(20)
-MOV_FROM_BANK0_REG(21)
-MOV_FROM_BANK0_REG(22)
-MOV_FROM_BANK0_REG(23)
-MOV_FROM_BANK0_REG(24)
-MOV_FROM_BANK0_REG(25)
-MOV_FROM_BANK0_REG(26)
-MOV_FROM_BANK0_REG(27)
-MOV_FROM_BANK0_REG(28)
-MOV_FROM_BANK0_REG(29)
-MOV_FROM_BANK0_REG(30)
-MOV_FROM_BANK0_REG(31)
-
-
-// mov from reg table
-// r19:value, r30: return address
-// r26 may be destroyed
-ENTRY(asm_mov_from_reg)
- MOV_FROM_REG(0)
- MOV_FROM_REG(1)
- MOV_FROM_REG(2)
- MOV_FROM_REG(3)
- MOV_FROM_REG(4)
- MOV_FROM_REG(5)
- MOV_FROM_REG(6)
- MOV_FROM_REG(7)
- MOV_FROM_REG(8)
- MOV_FROM_REG(9)
- MOV_FROM_REG(10)
- MOV_FROM_REG(11)
- MOV_FROM_REG(12)
- MOV_FROM_REG(13)
- MOV_FROM_REG(14)
- MOV_FROM_REG(15)
- JMP_TO_MOV_FROM_BANK0_REG(16)
- JMP_TO_MOV_FROM_BANK0_REG(17)
- JMP_TO_MOV_FROM_BANK0_REG(18)
- JMP_TO_MOV_FROM_BANK0_REG(19)
- JMP_TO_MOV_FROM_BANK0_REG(20)
- JMP_TO_MOV_FROM_BANK0_REG(21)
- JMP_TO_MOV_FROM_BANK0_REG(22)
- JMP_TO_MOV_FROM_BANK0_REG(23)
- JMP_TO_MOV_FROM_BANK0_REG(24)
- JMP_TO_MOV_FROM_BANK0_REG(25)
- JMP_TO_MOV_FROM_BANK0_REG(26)
- JMP_TO_MOV_FROM_BANK0_REG(27)
- JMP_TO_MOV_FROM_BANK0_REG(28)
- JMP_TO_MOV_FROM_BANK0_REG(29)
- JMP_TO_MOV_FROM_BANK0_REG(30)
- JMP_TO_MOV_FROM_BANK0_REG(31)
- MOV_FROM_REG(32)
- MOV_FROM_REG(33)
- MOV_FROM_REG(34)
- MOV_FROM_REG(35)
- MOV_FROM_REG(36)
- MOV_FROM_REG(37)
- MOV_FROM_REG(38)
- MOV_FROM_REG(39)
- MOV_FROM_REG(40)
- MOV_FROM_REG(41)
- MOV_FROM_REG(42)
- MOV_FROM_REG(43)
- MOV_FROM_REG(44)
- MOV_FROM_REG(45)
- MOV_FROM_REG(46)
- MOV_FROM_REG(47)
- MOV_FROM_REG(48)
- MOV_FROM_REG(49)
- MOV_FROM_REG(50)
- MOV_FROM_REG(51)
- MOV_FROM_REG(52)
- MOV_FROM_REG(53)
- MOV_FROM_REG(54)
- MOV_FROM_REG(55)
- MOV_FROM_REG(56)
- MOV_FROM_REG(57)
- MOV_FROM_REG(58)
- MOV_FROM_REG(59)
- MOV_FROM_REG(60)
- MOV_FROM_REG(61)
- MOV_FROM_REG(62)
- MOV_FROM_REG(63)
- MOV_FROM_REG(64)
- MOV_FROM_REG(65)
- MOV_FROM_REG(66)
- MOV_FROM_REG(67)
- MOV_FROM_REG(68)
- MOV_FROM_REG(69)
- MOV_FROM_REG(70)
- MOV_FROM_REG(71)
- MOV_FROM_REG(72)
- MOV_FROM_REG(73)
- MOV_FROM_REG(74)
- MOV_FROM_REG(75)
- MOV_FROM_REG(76)
- MOV_FROM_REG(77)
- MOV_FROM_REG(78)
- MOV_FROM_REG(79)
- MOV_FROM_REG(80)
- MOV_FROM_REG(81)
- MOV_FROM_REG(82)
- MOV_FROM_REG(83)
- MOV_FROM_REG(84)
- MOV_FROM_REG(85)
- MOV_FROM_REG(86)
- MOV_FROM_REG(87)
- MOV_FROM_REG(88)
- MOV_FROM_REG(89)
- MOV_FROM_REG(90)
- MOV_FROM_REG(91)
- MOV_FROM_REG(92)
- MOV_FROM_REG(93)
- MOV_FROM_REG(94)
- MOV_FROM_REG(95)
- MOV_FROM_REG(96)
- MOV_FROM_REG(97)
- MOV_FROM_REG(98)
- MOV_FROM_REG(99)
- MOV_FROM_REG(100)
- MOV_FROM_REG(101)
- MOV_FROM_REG(102)
- MOV_FROM_REG(103)
- MOV_FROM_REG(104)
- MOV_FROM_REG(105)
- MOV_FROM_REG(106)
- MOV_FROM_REG(107)
- MOV_FROM_REG(108)
- MOV_FROM_REG(109)
- MOV_FROM_REG(110)
- MOV_FROM_REG(111)
- MOV_FROM_REG(112)
- MOV_FROM_REG(113)
- MOV_FROM_REG(114)
- MOV_FROM_REG(115)
- MOV_FROM_REG(116)
- MOV_FROM_REG(117)
- MOV_FROM_REG(118)
- MOV_FROM_REG(119)
- MOV_FROM_REG(120)
- MOV_FROM_REG(121)
- MOV_FROM_REG(122)
- MOV_FROM_REG(123)
- MOV_FROM_REG(124)
- MOV_FROM_REG(125)
- MOV_FROM_REG(126)
- MOV_FROM_REG(127)
-END(asm_mov_from_reg)
-
-
-/* must be in bank 0
- * parameter:
- * r31: pr
- * r24: b0
- * p2: whether increase IP
- * p3: whether check vpsr.ic
- */
-ENTRY(vmx_resume_to_guest)
- // ip ++
- (p2) mov r16=cr.ipsr
- (p2)dep.z r30=1,IA64_PSR_RI_BIT,1
- adds r19=IA64_VPD_BASE_OFFSET,r21
- ;;
- ld8 r25=[r19]
- (p2) add r16=r30,r16
- ;;
- (p2) mov cr.ipsr=r16
- adds r19= VPD_VPSR_START_OFFSET,r25
- ;;
- ld8 r19=[r19]
- ;;
- mov r23=r31
- mov r17=r0
- //vps_resume_normal/handler
- tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p7=vpsr.ic
- (p6) br.cond.sptk.many vmx_vps_resume_handler
- (p7) br.cond.sptk.few vmx_vps_resume_normal
-END(vmx_resume_to_guest)
-
-
-MOV_TO_BANK0_REG(16)
-MOV_TO_BANK0_REG(17)
-MOV_TO_BANK0_REG(18)
-MOV_TO_BANK0_REG(19)
-MOV_TO_BANK0_REG(20)
-MOV_TO_BANK0_REG(21)
-MOV_TO_BANK0_REG(22)
-MOV_TO_BANK0_REG(23)
-MOV_TO_BANK0_REG(24)
-MOV_TO_BANK0_REG(25)
-MOV_TO_BANK0_REG(26)
-MOV_TO_BANK0_REG(27)
-MOV_TO_BANK0_REG(28)
-MOV_TO_BANK0_REG(29)
-MOV_TO_BANK0_REG(30)
-MOV_TO_BANK0_REG(31)
-
-
-// mov to reg table
-// r19:value, r30: return address
-ENTRY(asm_mov_to_reg)
- MOV_TO_REG0
- MOV_TO_REG(1)
- MOV_TO_REG(2)
- MOV_TO_REG(3)
- MOV_TO_REG(4)
- MOV_TO_REG(5)
- MOV_TO_REG(6)
- MOV_TO_REG(7)
- MOV_TO_REG(8)
- MOV_TO_REG(9)
- MOV_TO_REG(10)
- MOV_TO_REG(11)
- MOV_TO_REG(12)
- MOV_TO_REG(13)
- MOV_TO_REG(14)
- MOV_TO_REG(15)
- JMP_TO_MOV_TO_BANK0_REG(16)
- JMP_TO_MOV_TO_BANK0_REG(17)
- JMP_TO_MOV_TO_BANK0_REG(18)
- JMP_TO_MOV_TO_BANK0_REG(19)
- JMP_TO_MOV_TO_BANK0_REG(20)
- JMP_TO_MOV_TO_BANK0_REG(21)
- JMP_TO_MOV_TO_BANK0_REG(22)
- JMP_TO_MOV_TO_BANK0_REG(23)
- JMP_TO_MOV_TO_BANK0_REG(24)
- JMP_TO_MOV_TO_BANK0_REG(25)
- JMP_TO_MOV_TO_BANK0_REG(26)
- JMP_TO_MOV_TO_BANK0_REG(27)
- JMP_TO_MOV_TO_BANK0_REG(28)
- JMP_TO_MOV_TO_BANK0_REG(29)
- JMP_TO_MOV_TO_BANK0_REG(30)
- JMP_TO_MOV_TO_BANK0_REG(31)
- MOV_TO_REG(32)
- MOV_TO_REG(33)
- MOV_TO_REG(34)
- MOV_TO_REG(35)
- MOV_TO_REG(36)
- MOV_TO_REG(37)
- MOV_TO_REG(38)
- MOV_TO_REG(39)
- MOV_TO_REG(40)
- MOV_TO_REG(41)
- MOV_TO_REG(42)
- MOV_TO_REG(43)
- MOV_TO_REG(44)
- MOV_TO_REG(45)
- MOV_TO_REG(46)
- MOV_TO_REG(47)
- MOV_TO_REG(48)
- MOV_TO_REG(49)
- MOV_TO_REG(50)
- MOV_TO_REG(51)
- MOV_TO_REG(52)
- MOV_TO_REG(53)
- MOV_TO_REG(54)
- MOV_TO_REG(55)
- MOV_TO_REG(56)
- MOV_TO_REG(57)
- MOV_TO_REG(58)
- MOV_TO_REG(59)
- MOV_TO_REG(60)
- MOV_TO_REG(61)
- MOV_TO_REG(62)
- MOV_TO_REG(63)
- MOV_TO_REG(64)
- MOV_TO_REG(65)
- MOV_TO_REG(66)
- MOV_TO_REG(67)
- MOV_TO_REG(68)
- MOV_TO_REG(69)
- MOV_TO_REG(70)
- MOV_TO_REG(71)
- MOV_TO_REG(72)
- MOV_TO_REG(73)
- MOV_TO_REG(74)
- MOV_TO_REG(75)
- MOV_TO_REG(76)
- MOV_TO_REG(77)
- MOV_TO_REG(78)
- MOV_TO_REG(79)
- MOV_TO_REG(80)
- MOV_TO_REG(81)
- MOV_TO_REG(82)
- MOV_TO_REG(83)
- MOV_TO_REG(84)
- MOV_TO_REG(85)
- MOV_TO_REG(86)
- MOV_TO_REG(87)
- MOV_TO_REG(88)
- MOV_TO_REG(89)
- MOV_TO_REG(90)
- MOV_TO_REG(91)
- MOV_TO_REG(92)
- MOV_TO_REG(93)
- MOV_TO_REG(94)
- MOV_TO_REG(95)
- MOV_TO_REG(96)
- MOV_TO_REG(97)
- MOV_TO_REG(98)
- MOV_TO_REG(99)
- MOV_TO_REG(100)
- MOV_TO_REG(101)
- MOV_TO_REG(102)
- MOV_TO_REG(103)
- MOV_TO_REG(104)
- MOV_TO_REG(105)
- MOV_TO_REG(106)
- MOV_TO_REG(107)
- MOV_TO_REG(108)
- MOV_TO_REG(109)
- MOV_TO_REG(110)
- MOV_TO_REG(111)
- MOV_TO_REG(112)
- MOV_TO_REG(113)
- MOV_TO_REG(114)
- MOV_TO_REG(115)
- MOV_TO_REG(116)
- MOV_TO_REG(117)
- MOV_TO_REG(118)
- MOV_TO_REG(119)
- MOV_TO_REG(120)
- MOV_TO_REG(121)
- MOV_TO_REG(122)
- MOV_TO_REG(123)
- MOV_TO_REG(124)
- MOV_TO_REG(125)
- MOV_TO_REG(126)
- MOV_TO_REG(127)
-END(asm_mov_to_reg)
diff --git a/xen/arch/ia64/vmx/pal_emul.c b/xen/arch/ia64/vmx/pal_emul.c
deleted file mode 100644
index d7ddf47a35..0000000000
--- a/xen/arch/ia64/vmx/pal_emul.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * PAL/SAL call delegation
- *
- * Copyright (c) 2004 Li Susie <susie.li@intel.com>
- * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <xen/lib.h>
-#include <asm/vcpu.h>
-#include <asm/dom_fw.h>
-#include <asm/pal.h>
-#include <asm/sal.h>
-
-void
-pal_emul(struct vcpu *vcpu)
-{
- u64 gr28, gr29, gr30, gr31;
- struct ia64_pal_retval result;
-
- vcpu_get_gr_nat(vcpu, 28, &gr28); //bank1
-
- /* FIXME: works only for static calling convention ? */
- vcpu_get_gr_nat(vcpu, 29, &gr29);
- vcpu_get_gr_nat(vcpu, 30, &gr30);
- vcpu_get_gr_nat(vcpu, 31, &gr31);
-
- perfc_incr(vmx_pal_emul);
- result = xen_pal_emulator(gr28, gr29, gr30, gr31);
-
- vcpu_set_gr(vcpu, 8, result.status, 0);
- vcpu_set_gr(vcpu, 9, result.v0, 0);
- vcpu_set_gr(vcpu, 10, result.v1, 0);
- vcpu_set_gr(vcpu, 11, result.v2, 0);
-}
-
-void
-sal_emul(struct vcpu *v)
-{
- struct sal_ret_values result;
- result = sal_emulator(vcpu_get_gr(v, 32), vcpu_get_gr(v, 33),
- vcpu_get_gr(v, 34), vcpu_get_gr(v, 35),
- vcpu_get_gr(v, 36), vcpu_get_gr(v, 37),
- vcpu_get_gr(v, 38), vcpu_get_gr(v, 39));
-
- vcpu_set_gr(v, 8, result.r8, 0);
- vcpu_set_gr(v, 9, result.r9, 0);
- vcpu_set_gr(v, 10, result.r10, 0);
- vcpu_set_gr(v, 11, result.r11, 0);
-}
diff --git a/xen/arch/ia64/vmx/save.c b/xen/arch/ia64/vmx/save.c
deleted file mode 100644
index f0e9145f91..0000000000
--- a/xen/arch/ia64/vmx/save.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * hvm/save.c: Save and restore HVM guest's emulated hardware state.
- *
- * Copyright (c) 2007, Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * IA64 support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <xen/types.h>
-#include <xen/hvm/save.h>
-
-void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr)
-{
- unsigned int i;
-
- for (i = 0; i < 5; ++i)
- hdr->cpuid[i] = ia64_get_cpuid(i);
-}
-
-int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
-{
- unsigned int i;
- if (hdr->magic != HVM_FILE_MAGIC) {
- gdprintk(XENLOG_ERR,
- "HVM restore: bad magic number %#"PRIx64"\n", hdr->magic);
- return -1;
- }
-
- if (hdr->version != HVM_FILE_VERSION) {
- gdprintk(XENLOG_ERR,
- "HVM restore: unsupported version %"PRIx64"\n", hdr->version);
- return -1;
- }
-
- for (i = 0; i < 5; ++i) {
- unsigned long cpuid = ia64_get_cpuid(i);
- /* TODO: need to define how big a difference is acceptable */
- if (hdr->cpuid[i] != cpuid)
- gdprintk(XENLOG_WARNING,
- "HVM restore: saved CPUID[%d] (%#lx) "
- "does not match host (%#lx).\n", i, hdr->cpuid[i], cpuid);
- }
-
- return 0;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/ia64/vmx/sioemu.c b/xen/arch/ia64/vmx/sioemu.c
deleted file mode 100644
index 8c3c8b373c..0000000000
--- a/xen/arch/ia64/vmx/sioemu.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * sioemu.c: Self IO emulation - hypercall and return.
- * Copyright (c) 2008, Tristan Gingold <tgingold@free.fr>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <asm/vcpu.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/sioemu.h>
-#include <public/arch-ia64/sioemu.h>
-#include <asm/dom_fw.h>
-#include <asm/debugger.h>
-#include <asm/sal.h>
-#include <asm/vlsapic.h>
-
-struct sioemu_callback_info *
-sioemu_deliver (void)
-{
- VCPU *vcpu = current;
- REGS *regs = vcpu_regs(vcpu);
- struct sioemu_callback_info *info = vcpu->arch.arch_vmx.sioemu_info_mva;
- unsigned long psr = vmx_vcpu_get_psr(vcpu);
-
- if (vcpu->vcpu_info->evtchn_upcall_mask)
- panic_domain (NULL, "sioemu_deliver: aleady in stub mode\n");
- if (info == NULL)
- panic_domain (NULL, "sioemu_deliver: set_callback not called\n");
-
- /* All cleared, but keep BN. */
- vmx_vcpu_set_psr(vcpu, IA64_PSR_MC | (psr & IA64_PSR_BN));
-
- /* Set info. */
- info->ip = regs->cr_iip;
- info->psr = psr;
- info->ifs = regs->cr_ifs;
- info->nats = (((regs->eml_unat >> IA64_PT_REGS_R8_SLOT) & 0x0f) << 8)
- | (((regs->eml_unat >> IA64_PT_REGS_R2_SLOT) & 1) << 2);
- info->r8 = regs->r8;
- info->r9 = regs->r9;
- info->r10 = regs->r10;
- info->r11 = regs->r11;
- info->r2 = regs->r2;
-
- regs->cr_ifs = 0; // pre-cover
- regs->cr_iip = vcpu->arch.event_callback_ip;
- regs->eml_unat &= ~(1UL << IA64_PT_REGS_R8_SLOT);
- regs->r8 = vcpu->arch.arch_vmx.sioemu_info_gpa;
-
- /* Mask events. */
- vcpu->vcpu_info->evtchn_upcall_mask = 1;
-
- debugger_event(XEN_IA64_DEBUG_ON_EVENT);
-
- return info;
-}
-
-static void
-sioemu_callback_return (void)
-{
- VCPU *vcpu = current;
- REGS *regs = vcpu_regs(vcpu);
- struct sioemu_callback_info *info = vcpu->arch.arch_vmx.sioemu_info_mva;
-
- if (info == NULL)
- panic_domain (NULL, "sioemu_deliver: set_callback not called\n");
- if ((info->cause & ~0x1UL) != 0)
- panic_domain (NULL, "sioemu_callback_return: bad operation (%lx)\n",
- info->cause);
-
- /* First restore registers. */
- regs->cr_iip = info->ip;
- regs->cr_ifs = info->ifs;
- vmx_vcpu_set_psr (vcpu, info->psr);
- regs->r8 = info->r8;
- regs->r9 = info->r9;
- regs->r10 = info->r10;
- regs->r11 = info->r11;
- regs->r2 = info->r2;
- regs->eml_unat &= ~((0x0fUL << IA64_PT_REGS_R8_SLOT)
- | (1UL << IA64_PT_REGS_R2_SLOT));
- regs->eml_unat |= (((info->nats >> 8) & 0x0f) << IA64_PT_REGS_R8_SLOT)
- | (((info->nats >> 2) & 1) << IA64_PT_REGS_R2_SLOT);
-
- /* Unmask events. */
- vcpu->vcpu_info->evtchn_upcall_mask = 0;
-
- /* Then apply commands. */
- if (info->cause & 1) {
- emulate_io_update (vcpu, info->arg0, info->arg1, info->arg2);
- }
-}
-
-void
-sioemu_deliver_event (void)
-{
- struct sioemu_callback_info *info;
-
- info = sioemu_deliver ();
- info->cause = SIOEMU_CB_EVENT;
-}
-
-void
-sioemu_io_emulate (unsigned long padr, unsigned long data,
- unsigned long data1, unsigned long word)
-{
- struct sioemu_callback_info *info;
-
- info = sioemu_deliver ();
- info->cause = SIOEMU_CB_IO_EMULATE;
- info->arg0 = padr;
- info->arg1 = data;
- info->arg2 = data1;
- info->arg3 = word;
-}
-
-void
-sioemu_sal_assist (struct vcpu *v)
-{
- struct sioemu_callback_info *info;
-
- info = sioemu_deliver ();
- info->cause = SIOEMU_CB_SAL_ASSIST;
-}
-
-static int
-sioemu_set_callback (struct vcpu *v, unsigned long cb_ip, unsigned long paddr)
-{
- struct page_info *page;
- unsigned long mfn;
- pte_t pte;
-
- v->arch.event_callback_ip = cb_ip;
- if ((paddr & 0xfff) || v->arch.arch_vmx.sioemu_info_mva)
- return -EINVAL;
- pte = *lookup_noalloc_domain_pte(v->domain, paddr);
- if (!pte_present(pte) || !pte_mem(pte))
- return -EINVAL;
- mfn = pte_pfn(pte);
- ASSERT(mfn_valid(mfn));
-
- page = mfn_to_page(mfn);
- if (get_page(page, v->domain) == 0)
- return -EINVAL;
- v->arch.arch_vmx.sioemu_info_gpa = paddr;
- v->arch.arch_vmx.sioemu_info_mva = mfn_to_virt(mfn);
- return 0;
-}
-
-static int
-sioemu_add_io_physmap (struct domain *d, unsigned long start,
- unsigned long size, unsigned long type)
-{
- unsigned long i;
- int res;
-
- /* Convert to ppn. */
- type <<= PAGE_SHIFT;
-
- /* Check type. */
- if (type == 0 || (type & _PAGE_PPN_MASK) != type)
- return -EINVAL;
- if ((start & (PAGE_SIZE -1)) || (size & (PAGE_SIZE - 1)))
- return -EINVAL;
-
- /* Check area is currently unassigned. */
- for (i = start; i < start + size; i += PAGE_SIZE) {
- if (____lookup_domain_mpa(d, i) != INVALID_MFN)
- return -EBUSY;
- }
-
- /* Set. */
- for (i = start; i < start + size; i += PAGE_SIZE) {
- res = __assign_domain_page(d, i, type, ASSIGN_writable | ASSIGN_io);
- if (res != 0)
- return res;
- }
-
- return 0;
-}
-
-void
-sioemu_hypercall (struct pt_regs *regs)
-{
- //printk ("sioemu_hypercall: r2=%lx r8=%lx r9=%lx\n",
- // regs->r2, regs->r8, regs->r9);
-
- if (current->vcpu_info->evtchn_upcall_mask == 0)
- panic_domain(NULL, "sioemu_hypercall: not in stub mode\n");
-
- switch (regs->r2 & FW_HYPERCALL_NUM_MASK_LOW)
- {
- case SIOEMU_HYPERCALL_SET_CALLBACK:
- regs->r8 = sioemu_set_callback(current, regs->r8, regs->r9);
- break;
- case SIOEMU_HYPERCALL_START_FW:
- regs->cr_iip = regs->r8;
- vmx_vcpu_set_psr(current, regs->r9);
- current->vcpu_info->evtchn_upcall_mask = 0;
- break;
- case SIOEMU_HYPERCALL_ADD_IO_PHYSMAP:
- regs->r8 = sioemu_add_io_physmap(current->domain,
- regs->r8, regs->r9, regs->r10);
- break;
- case SIOEMU_HYPERCALL_GET_TIME:
- {
- uint64_t sec, nsec, now;
- get_wallclock(&sec, &nsec, &now);
- regs->r8 = (sec << 30) + nsec;
- regs->r9 = now;
- break;
- }
- case SIOEMU_HYPERCALL_FLUSH_CACHE:
- regs->r8 = ia64_sal_cache_flush(regs->r8);
- break;
- case SIOEMU_HYPERCALL_FREQ_BASE:
- regs->r8 = ia64_sal_freq_base(regs->r8, &regs->r9, &regs->r10);
- break;
- case SIOEMU_HYPERCALL_DELIVER_INT:
- regs->r8 = vlsapic_deliver_int(current->domain,
- regs->r8, regs->r9, regs->r10);
- break;
- case SIOEMU_HYPERCALL_CALLBACK_RETURN:
- sioemu_callback_return ();
- vcpu_decrement_iip(current);
- break;
- default:
- panic_domain (NULL, "bad sioemu hypercall %lx\n", regs->r2);
- break;
- }
-}
diff --git a/xen/arch/ia64/vmx/vacpi.c b/xen/arch/ia64/vmx/vacpi.c
deleted file mode 100644
index 1720aebcc7..0000000000
--- a/xen/arch/ia64/vmx/vacpi.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * vacpi.c: emulation of the ACPI
- * based on x86 hvm/pmtimer.c
- *
- * Copyright (c) 2007, FUJITSU LIMITED
- * Kouya Shimura <kouya at jp fujitsu com>
- * Copyright (c) 2007 VA Linux Systems Japan K.K
- * Isaku Yamahata <yamahata at valinux co jp>
- * SMP support
- * save/restore support
- *
- * Copyright (c) 2007, XenSource inc.
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <asm/vmx_vcpu.h>
-#include <asm/vmx.h>
-#include <asm/hvm/vacpi.h>
-#include <asm/hvm/support.h>
-#include <public/hvm/save.h>
-
-/* The interesting bits of the PM1a_STS register */
-#define TMR_STS (1 << 0)
-#define PWRBTN_STS (1 << 5)
-#define GBL_STS (1 << 8)
-
-/* The same in PM1a_EN */
-#define TMR_EN (1 << 0)
-#define PWRBTN_EN (1 << 5)
-#define GBL_EN (1 << 8)
-
-/* Mask of bits in PM1a_STS that can generate an SCI. Although the ACPI
- * spec lists other bits, the PIIX4, which we are emulating, only
- * supports these three. For now, we only use TMR_STS; in future we
- * will let qemu set the other bits */
-#define SCI_MASK (TMR_STS|PWRBTN_STS|GBL_STS)
-
-/* SCI IRQ number (must match SCI_INT number in ACPI FADT in hvmloader) */
-#define SCI_IRQ 9
-
-/* We provide a 32-bit counter (must match the TMR_VAL_EXT bit in the FADT) */
-#define TMR_VAL_MASK (0xffffffff)
-#define TMR_VAL_MSB (0x80000000)
-
-/*
- * Locking order: vacpi->lock => viosapic->lock
- * pmt_update_sci() => viosapic_set_irq() => viosapic->lock
- */
-/* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */
-static void pmt_update_sci(struct domain *d, struct vacpi *s)
-{
- ASSERT(spin_is_locked(&s->lock));
- if (s->regs.pm1a_en & s->regs.pm1a_sts & SCI_MASK)
- viosapic_set_irq(d, SCI_IRQ, 1); /* Assert */
- else
- viosapic_set_irq(d, SCI_IRQ, 0);
-}
-
-/* Set the correct value in the timer, accounting for time elapsed
- * since the last time we did that. */
-static void pmt_update_time(struct domain *d)
-{
- struct vacpi *s = &d->arch.hvm_domain.vacpi;
- s_time_t curr_gtime;
- unsigned long delta;
- uint32_t msb = s->regs.tmr_val & TMR_VAL_MSB;
-
- ASSERT(spin_is_locked(&s->lock));
-
- /* Update the timer */
- curr_gtime = NOW();
- delta = curr_gtime - s->last_gtime;
- delta = ((delta >> 8) * ((FREQUENCE_PMTIMER << 32) / SECONDS(1))) >> 24;
- s->regs.tmr_val += delta;
- s->regs.tmr_val &= TMR_VAL_MASK;
- s->last_gtime = curr_gtime;
-
- /* If the counter's MSB has changed, set the status bit */
- if ((s->regs.tmr_val & TMR_VAL_MSB) != msb) {
- s->regs.pm1a_sts |= TMR_STS;
- pmt_update_sci(d, s);
- }
-}
-
-/* This function should be called soon after each time the MSB of the
- * pmtimer register rolls over, to make sure we update the status
- * registers and SCI at least once per rollover */
-static void pmt_timer_callback(void *opaque)
-{
- struct domain *d = opaque;
- struct vacpi *s = &d->arch.hvm_domain.vacpi;
- uint64_t cycles, time_flip;
-
- spin_lock(&s->lock);
-
- /* Recalculate the timer and make sure we get an SCI if we need one */
- pmt_update_time(d);
-
- /* How close are we to the next MSB flip? */
- cycles = TMR_VAL_MSB - (s->regs.tmr_val & (TMR_VAL_MSB - 1));
-
- /* Overall time between MSB flips */
- time_flip = (((SECONDS(1) << 23) / FREQUENCE_PMTIMER) * cycles) >> 23;
-
- /* Wake up again near the next bit-flip */
- set_timer(&s->timer, NOW() + time_flip + MILLISECS(1));
-
- spin_unlock(&s->lock);
-}
-
-int vacpi_intercept(ioreq_t * iop, u64 * val)
-{
- struct domain *d = current->domain;
- struct vacpi *s = &d->arch.hvm_domain.vacpi;
- uint64_t addr_off = iop->addr - ACPI_PM1A_EVT_BLK_ADDRESS;
-
- if (addr_off < 4) { /* Access to PM1a_STS and PM1a_EN registers */
- void *p = (void *)&s->regs.evt_blk + addr_off;
-
- spin_lock(&s->lock);
- if (iop->dir == 1) { /* Read */
- if (iop->size == 1)
- *val = *(uint8_t *) p;
- else if (iop->size == 2)
- *val = *(uint16_t *) p;
- else if (iop->size == 4)
- *val = *(uint32_t *) p;
- else
- panic_domain(NULL, "wrong ACPI "
- "PM1A_EVT_BLK access\n");
- } else { /* Write */
- uint8_t *sp = (uint8_t *) & iop->data;
- int i;
-
- for (i = 0; i < iop->size; i++, addr_off++, p++, sp++) {
- if (addr_off < 2) /* PM1a_STS */
- /* write-to-clear */
- *(uint8_t *) p &= ~*sp;
- else /* PM1a_EN */
- *(uint8_t *) p = *sp;
- }
- /* Fix the SCI state to match the new register state */
- pmt_update_sci(d, s);
- }
- spin_unlock(&s->lock);
-
- iop->state = STATE_IORESP_READY;
- vmx_io_assist(current);
- return 1;
- }
-
- if (iop->addr == ACPI_PM_TMR_BLK_ADDRESS) {
- if (iop->size != 4)
- panic_domain(NULL, "wrong ACPI PM timer access\n");
- if (iop->dir == 1) { /* Read */
- spin_lock(&s->lock);
- pmt_update_time(d);
- *val = s->regs.tmr_val;
- spin_unlock(&s->lock);
- }
- /* PM_TMR_BLK is read-only */
- iop->state = STATE_IORESP_READY;
- vmx_io_assist(current);
- return 1;
- }
-
- return 0;
-}
-
-void vacpi_init(struct domain *d)
-{
- struct vacpi *s = &d->arch.hvm_domain.vacpi;
-
- spin_lock_init(&s->lock);
-
- s->regs.tmr_val = 0;
- s->regs.evt_blk = 0;
- s->last_gtime = NOW();
-
- /* Set up callback to fire SCIs when the MSB of TMR_VAL changes */
- init_timer(&s->timer, pmt_timer_callback, d, cpumask_first(&cpu_online_map));
- pmt_timer_callback(d);
-}
-
-void vacpi_relinquish_resources(struct domain *d)
-{
- struct vacpi *s = &d->arch.hvm_domain.vacpi;
- kill_timer(&s->timer);
-}
-
-// stolen from xen/arch/x86/hvm/pmtimer.c
-static int vacpi_save(struct domain *d, hvm_domain_context_t *h)
-{
- struct vacpi *s = &d->arch.hvm_domain.vacpi;
- unsigned long delta;
- uint32_t msb = s->regs.tmr_val & TMR_VAL_MSB;
- struct hvm_hw_ia64_vacpi vacpi_save;
- int rc;
-
- stop_timer(&s->timer); //XXX
-
- spin_lock(&s->lock);
-
- /* Update the counter to the guest's current time. We always save
- * with the domain paused, so the saved time should be after the
- * last_gtime, but just in case, make sure we only go forwards */
-
- //XXX NOW() should be the time that domais paused
- delta = NOW() - s->last_gtime;
- delta = ((delta >> 8) * ((FREQUENCE_PMTIMER << 32) / SECONDS(1))) >> 24;
- if ( delta < 1UL<<31 )
- s->regs.tmr_val += delta;
- if ( (s->regs.tmr_val & TMR_VAL_MSB) != msb )
- s->regs.pm1a_sts |= TMR_STS;
- /* No point in setting the SCI here because we'll already have saved the
- * IRQ and *PIC state; we'll fix it up when we restore the domain */
-
- vacpi_save.regs = s->regs;
- rc = hvm_save_entry(VACPI, 0, h, &vacpi_save);
-
- spin_unlock(&s->lock);
-
- pmt_timer_callback(d);//XXX This might change the domain state.
- return rc;
-}
-
-static int vacpi_load(struct domain *d, hvm_domain_context_t *h)
-{
- struct vacpi *s = &d->arch.hvm_domain.vacpi;
- struct hvm_hw_ia64_vacpi vacpi_load;
-
- /* Reload the registers */
- if ( hvm_load_entry(VACPI, h, &vacpi_load) )
- return -EINVAL;
-
- stop_timer(&s->timer);//XXX
-
- spin_lock(&s->lock);
-
- s->regs = vacpi_load.regs;
-
- /* Calculate future counter values from now. */
- //XXX NOW(); last_gtime should be set when domain is unpaused
- s->last_gtime = NOW();
-
- /* Set the SCI state from the registers */
- pmt_update_sci(d, s);
-
- spin_unlock(&s->lock);
-
- pmt_timer_callback(d);//XXX
- return 0;
-}
-
-HVM_REGISTER_SAVE_RESTORE(VACPI, vacpi_save, vacpi_load, 1, HVMSR_PER_DOM);
diff --git a/xen/arch/ia64/vmx/viosapic.c b/xen/arch/ia64/vmx/viosapic.c
deleted file mode 100644
index 2efdb49154..0000000000
--- a/xen/arch/ia64/vmx/viosapic.c
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Copyright (C) 2001 MandrakeSoft S.A.
- *
- * MandrakeSoft S.A.
- * 43, rue d'Aboukir
- * 75002 Paris - France
- * http://www.linux-mandrake.com/
- * http://www.mandrakesoft.com/
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Yunhong Jiang <yunhong.jiang@intel.com>
- * Ported to xen by using virtual IRQ line.
- *
- * Copyright (C) 2007 VA Linux Systems Japan K.K.
- * Isaku Yamahata <yamahata at valinux co jp>
- * SMP support
- * xen save/restore support
- */
-
-#include <xen/config.h>
-#include <xen/types.h>
-#include <xen/mm.h>
-#include <xen/xmalloc.h>
-#include <xen/lib.h>
-#include <xen/errno.h>
-#include <public/hvm/ioreq.h>
-#include <asm/vlsapic.h>
-#include <asm/viosapic.h>
-#include <asm/current.h>
-#include <asm/event.h>
-#include <asm/hvm/support.h>
-#include <public/hvm/save.h>
-
-static void viosapic_deliver(struct viosapic *viosapic, int irq)
-{
- uint16_t dest = viosapic->redirtbl[irq].fields.dest_id;
- uint8_t delivery_mode = viosapic->redirtbl[irq].fields.delivery_mode;
- uint8_t vector = viosapic->redirtbl[irq].fields.vector;
-
- ASSERT(spin_is_locked(&viosapic->lock));
-
- if (vlsapic_deliver_int(viosapic_domain (viosapic),
- dest, delivery_mode, vector) < 0)
- gdprintk(XENLOG_WARNING,
- "viosapic: can't deliver int %u to %u (dm=%u)\n",
- vector, dest, delivery_mode);
-}
-
-
-static int iosapic_get_highest_irq(struct viosapic *viosapic)
-{
- uint64_t irqs = viosapic->irr & ~viosapic->isr ;
-
- if (irqs)
- return ia64_fls(irqs);
-
- return -1;
-}
-
-
-/* XXX If level interrupt, use vector->irq table for performance */
-static int get_redir_num(struct viosapic *viosapic, int vector)
-{
- int i;
-
- ASSERT(spin_is_locked(&viosapic->lock));
- for ( i = 0; i < VIOSAPIC_NUM_PINS; i++ )
- if ( viosapic->redirtbl[i].fields.vector == vector )
- return i;
-
- return -1;
-}
-
-
-static void service_iosapic(struct viosapic *viosapic)
-{
- int irq;
-
- while ( (irq = iosapic_get_highest_irq(viosapic)) != -1 )
- {
- if ( viosapic->redirtbl[irq].fields.trig_mode == SAPIC_LEVEL )
- viosapic->isr |= (1UL << irq);
-
- viosapic_deliver(viosapic, irq);
-
- viosapic->irr &= ~(1UL << irq);
- }
-}
-
-
-static void viosapic_update_EOI(struct viosapic *viosapic, int vector)
-{
- int redir_num;
-
- spin_lock(&viosapic->lock);
- if ( (redir_num = get_redir_num(viosapic, vector)) == -1 )
- {
- spin_unlock(&viosapic->lock);
- gdprintk(XENLOG_WARNING, "Can't find redir item for %d EOI\n", vector);
- return;
- }
-
- if ( !test_and_clear_bit(redir_num, &viosapic->isr) )
- {
- spin_unlock(&viosapic->lock);
- if ( viosapic->redirtbl[redir_num].fields.trig_mode == SAPIC_LEVEL )
- gdprintk(XENLOG_WARNING, "redir %d not set for %d EOI\n",
- redir_num, vector);
- return;
- }
- if ( iommu_enabled )
- {
- spin_unlock(&viosapic->lock);
- hvm_dpci_eoi(current->domain, redir_num, &viosapic->redirtbl[redir_num]);
- spin_lock(&viosapic->lock);
- }
-
- service_iosapic(viosapic);
- spin_unlock(&viosapic->lock);
-}
-
-
-static unsigned long viosapic_read_indirect(struct viosapic *viosapic,
- unsigned long addr,
- unsigned long length)
-{
- unsigned long result = 0;
-
- switch ( viosapic->ioregsel )
- {
- case VIOSAPIC_VERSION:
- result = ((((VIOSAPIC_NUM_PINS - 1) & 0xff) << 16)
- | (VIOSAPIC_VERSION_ID & 0xff));
- break;
-
- default:
- {
- /* ioregsel might be written at the same time. copy it before use. */
- uint32_t ioregsel = viosapic->ioregsel;
- uint32_t redir_index;
- uint64_t redir_content;
-
- redir_index = (ioregsel - 0x10) >> 1;
- if ( redir_index >= VIOSAPIC_NUM_PINS )
- {
- gdprintk(XENLOG_WARNING, "viosapic_read_indirect:undefined "
- "ioregsel %x\n", ioregsel);
- break;
- }
-
- redir_content = viosapic->redirtbl[redir_index].bits;
- result = (ioregsel & 0x1) ?
- (redir_content >> 32) & 0xffffffff :
- redir_content & 0xffffffff;
- break;
- }
- }
-
- return result;
-}
-
-
-unsigned long viosapic_read(struct vcpu *v,
- unsigned long addr,
- unsigned long length)
-{
- struct viosapic *viosapic = vcpu_viosapic(v);
- uint32_t result;
-
- addr &= 0xff;
-
- switch ( addr )
- {
- case VIOSAPIC_REG_SELECT:
- result = viosapic->ioregsel;
- break;
-
- case VIOSAPIC_WINDOW:
- result = viosapic_read_indirect(viosapic, addr, length);
- break;
-
- default:
- result = 0;
- break;
- }
-
- return result;
-}
-
-
-static void viosapic_write_indirect(struct viosapic *viosapic,
- unsigned long addr,
- unsigned long length,
- unsigned long val)
-{
- switch ( viosapic->ioregsel )
- {
- case VIOSAPIC_VERSION:
- /* Writes are ignored. */
- break;
-
- default:
- {
- /* ioregsel might be written at the same time. copy it before use. */
- uint32_t ioregsel = viosapic->ioregsel;
- uint32_t redir_index;
- uint64_t redir_content;
-
- redir_index = (ioregsel - 0x10) >> 1;
- if ( redir_index >= VIOSAPIC_NUM_PINS )
- {
- gdprintk(XENLOG_WARNING, "viosapic_write_indirect "
- "error register %x\n", viosapic->ioregsel);
- break;
- }
-
- spin_lock(&viosapic->lock);
- redir_content = viosapic->redirtbl[redir_index].bits;
-
- if ( ioregsel & 0x1 )
- {
- redir_content = (((uint64_t)val & 0xffffffff) << 32) |
- (redir_content & 0xffffffff);
- }
- else
- {
- redir_content = ((redir_content >> 32) << 32) |
- (val & 0xffffffff);
- }
- viosapic->redirtbl[redir_index].bits = redir_content;
- spin_unlock(&viosapic->lock);
- break;
- }
- } /* switch */
-}
-
-
-void viosapic_write(struct vcpu *v,
- unsigned long addr,
- unsigned long length,
- unsigned long val)
-{
- struct viosapic *viosapic = vcpu_viosapic(v);
-
- addr &= 0xff;
-
- switch ( addr )
- {
- case VIOSAPIC_REG_SELECT:
- viosapic->ioregsel = val;
- break;
-
- case VIOSAPIC_WINDOW:
- viosapic_write_indirect(viosapic, addr, length, val);
- break;
-
- case VIOSAPIC_EOI:
- viosapic_update_EOI(viosapic, val);
- break;
-
- default:
- break;
- }
-}
-
-
-static void viosapic_reset(struct viosapic *viosapic)
-{
- int i;
-
- memset(viosapic, 0, sizeof(*viosapic));
-
- for ( i = 0; i < VIOSAPIC_NUM_PINS; i++ )
- {
- viosapic->redirtbl[i].fields.mask = 0x1;
- }
- spin_lock_init(&viosapic->lock);
-}
-
-void viosapic_set_irq(struct domain *d, int irq, int level)
-{
- struct viosapic *viosapic = domain_viosapic(d);
- uint64_t bit;
-
- spin_lock(&viosapic->lock);
- if ( (irq < 0) || (irq >= VIOSAPIC_NUM_PINS) )
- goto out;
-
- if ( viosapic->redirtbl[irq].fields.mask )
- goto out;
-
- bit = 1UL << irq;
- if ( viosapic->redirtbl[irq].fields.trig_mode == SAPIC_LEVEL )
- {
- if ( level )
- viosapic->irr |= bit;
- else
- viosapic->irr &= ~bit;
- }
- else
- {
- if ( level )
- /* XXX No irr clear for edge interrupt */
- viosapic->irr |= bit;
- }
-
- service_iosapic(viosapic);
-out:
- spin_unlock(&viosapic->lock);
-}
-
-void viosapic_set_pci_irq(struct domain *d, int device, int intx, int level)
-{
- int irq;
- irq = hvm_pci_intx_gsi(device, intx);
-
- viosapic_set_irq(d, irq, level);
-}
-
-void viosapic_init(struct domain *d)
-{
- struct viosapic *viosapic = domain_viosapic(d);
-
- viosapic_reset(viosapic);
-
- viosapic->lowest_vcpu = NULL;
-
- viosapic->base_address = VIOSAPIC_DEFAULT_BASE_ADDRESS;
-}
-
-#define VIOSAPIC_INVALID_VCPU_ID (-1UL)
-static int viosapic_save(struct domain *d, hvm_domain_context_t *h)
-{
- struct viosapic *viosapic = domain_viosapic(d);
- struct hvm_hw_ia64_viosapic viosapic_save;
- int i;
-
- memset(&viosapic_save, 0, sizeof(viosapic_save));
-
- spin_lock(&viosapic->lock);
- viosapic_save.irr = viosapic->irr;
- viosapic_save.isr = viosapic->isr;
- viosapic_save.ioregsel = viosapic->ioregsel;
- if (viosapic->lowest_vcpu != NULL)
- viosapic_save.lowest_vcpu_id = viosapic->lowest_vcpu->vcpu_id;
- else
- viosapic_save.lowest_vcpu_id = VIOSAPIC_INVALID_VCPU_ID;
- viosapic_save.base_address = viosapic->base_address;
-
- for (i = 0; i < VIOSAPIC_NUM_PINS; i++)
- viosapic_save.redirtbl[i] = viosapic->redirtbl[i];
- spin_unlock(&viosapic->lock);
-
- return hvm_save_entry(VIOSAPIC, 0, h, &viosapic_save);
-}
-
-static int viosapic_load(struct domain *d, hvm_domain_context_t *h)
-{
- struct viosapic *viosapic = domain_viosapic(d);
- struct hvm_hw_ia64_viosapic viosapic_load;
- struct vcpu *lowest_vcpu;
- int i;
-
- if (hvm_load_entry(VIOSAPIC, h, &viosapic_load))
- return -EINVAL;
-
- lowest_vcpu = NULL;
- if (viosapic_load.lowest_vcpu_id < d->max_vcpus)
- lowest_vcpu = d->vcpu[viosapic_load.lowest_vcpu_id];
- else if (viosapic_load.lowest_vcpu_id != VIOSAPIC_INVALID_VCPU_ID)
- return -EINVAL;
-
- if (viosapic_load.base_address != VIOSAPIC_DEFAULT_BASE_ADDRESS)
- return -EINVAL;
-
- spin_lock(&viosapic->lock);
- viosapic->irr = viosapic_load.irr;
- viosapic->isr = viosapic_load.isr;
- viosapic->ioregsel = viosapic_load.ioregsel;
-
- viosapic->lowest_vcpu = lowest_vcpu;
-
- viosapic->base_address = viosapic_load.base_address;
-
- for (i = 0; i < VIOSAPIC_NUM_PINS; i++)
- viosapic->redirtbl[i] = viosapic_load.redirtbl[i];
-
- service_iosapic(viosapic);//XXX
- spin_unlock(&viosapic->lock);
-
- return 0;
-}
-
-HVM_REGISTER_SAVE_RESTORE(VIOSAPIC, viosapic_save, viosapic_load,
- 1, HVMSR_PER_DOM);
diff --git a/xen/arch/ia64/vmx/vlsapic.c b/xen/arch/ia64/vmx/vlsapic.c
deleted file mode 100644
index 39f59a7e8a..0000000000
--- a/xen/arch/ia64/vmx/vlsapic.c
+++ /dev/null
@@ -1,961 +0,0 @@
-
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vlsapic.c: virtual lsapic model including ITC timer.
- * Copyright (c) 2005, Intel Corporation.
- *
- * Copyright (c) 2007, Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * save/restore support
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- */
-
-#include <linux/sched.h>
-#include <public/xen.h>
-#include <asm/ia64_int.h>
-#include <asm/vcpu.h>
-#include <asm/regionreg.h>
-#include <asm/processor.h>
-#include <asm/delay.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/regs.h>
-#include <asm/gcc_intrin.h>
-#include <asm/vmx_mm_def.h>
-#include <asm/vmx.h>
-#include <asm/vmx_vpd.h>
-#include <asm/hw_irq.h>
-#include <asm/vmx_pal_vsa.h>
-#include <asm/kregs.h>
-#include <asm/vmx_platform.h>
-#include <asm/viosapic.h>
-#include <asm/vlsapic.h>
-#include <asm/vmx_phy_mode.h>
-#include <asm/linux/jiffies.h>
-#include <xen/domain.h>
-#include <asm/hvm/support.h>
-#include <public/hvm/save.h>
-#include <public/arch-ia64/hvm/memmap.h>
-
-#ifdef IPI_DEBUG
-#define IPI_DPRINTK(x...) printk(x)
-#else
-#define IPI_DPRINTK(x...)
-#endif
-
-//u64 fire_itc;
-//u64 fire_itc2;
-//u64 fire_itm;
-//u64 fire_itm2;
-/*
- * Update the checked last_itc.
- */
-
-extern void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
- u64 vector, REGS *regs);
-static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
-{
- vtm->last_itc = cur_itc;
-}
-
-/*
- * Next for vLSapic
- */
-
-#define NMI_VECTOR 2
-#define ExtINT_VECTOR 0
-#define NULL_VECTOR -1
-
-static void update_vhpi(VCPU *vcpu, int vec)
-{
- u64 vhpi;
-
- if (vec == NULL_VECTOR)
- vhpi = 0;
- else if (vec == NMI_VECTOR)
- vhpi = 32;
- else if (vec == ExtINT_VECTOR)
- vhpi = 16;
- else
- vhpi = vec >> 4;
-
- VCPU(vcpu,vhpi) = vhpi;
- // TODO: Add support for XENO
- if (VCPU(vcpu,vac).a_int) {
- vmx_vpd_pin(vcpu);
- ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
- (uint64_t)vcpu->arch.privregs, 0, 0, 0, 0, 0, 0);
- vmx_vpd_unpin(vcpu);
- }
-}
-
-
-/*
- * May come from virtualization fault or
- * nested host interrupt.
- */
-static int vmx_vcpu_unpend_interrupt(VCPU *vcpu, uint8_t vector)
-{
- int ret;
-
- if (vector & ~0xff) {
- dprintk(XENLOG_WARNING, "vmx_vcpu_pend_interrupt: bad vector\n");
- return -1;
- }
-
- ret = test_and_clear_bit(vector, &VCPU(vcpu, irr[0]));
-
- if (ret) {
- vcpu->arch.irq_new_pending = 1;
- wmb();
- }
-
- return ret;
-}
-
-/*
- * ITC value saw in guest (host+offset+drift).
- */
-static uint64_t now_itc(vtime_t *vtm)
-{
- uint64_t guest_itc = vtm->vtm_offset + ia64_get_itc();
-
- if (guest_itc >= vtm->last_itc)
- return guest_itc;
- else
- /* guest ITC went backward due to LP switch */
- return vtm->last_itc;
-}
-
-/*
- * Interval time components reset.
- */
-static void vtm_reset(VCPU *vcpu)
-{
- int i;
- u64 vtm_offset;
- VCPU *v;
- struct domain *d = vcpu->domain;
- vtime_t *vtm = &VMX(vcpu, vtm);
-
- if (vcpu->vcpu_id == 0) {
- vtm_offset = 0UL - ia64_get_itc();
- for (i = d->max_vcpus - 1; i >= 0; i--) {
- if ((v = d->vcpu[i]) != NULL) {
- VMX(v, vtm).vtm_offset = vtm_offset;
- VMX(v, vtm).last_itc = 0;
- }
- }
- }
- vtm->vtm_local_drift = 0;
- VCPU(vcpu, itm) = 0;
- VCPU(vcpu, itv) = 0x10000;
- vtm->last_itc = 0;
-}
-
-/* callback function when vtm_timer expires */
-static void vtm_timer_fn(void *data)
-{
- VCPU *vcpu = data;
- vtime_t *vtm = &VMX(vcpu, vtm);
- u64 vitv;
-
- vitv = VCPU(vcpu, itv);
- if (!ITV_IRQ_MASK(vitv)) {
- vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(vitv));
- vcpu_unblock(vcpu);
- } else
- vtm->pending = 1;
-
- /*
- * "+ 1" is for fixing oops message at timer_interrupt() on VTI guest.
- * If oops checking condition changed to timer_after_eq() on VTI guest,
- * this parameter should be erased.
- */
- update_last_itc(vtm, VCPU(vcpu, itm) + 1); // update vITC
-}
-
-void vtm_init(VCPU *vcpu)
-{
- vtime_t *vtm;
- uint64_t itc_freq;
-
- vtm = &VMX(vcpu, vtm);
-
- itc_freq = local_cpu_data->itc_freq;
- vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
- vtm->cfg_min_grun=itc_freq*MIN_GUEST_RUNNING_TIME/1000;
- init_timer(&vtm->vtm_timer, vtm_timer_fn, vcpu, vcpu->processor);
- vtm_reset(vcpu);
-}
-
-/*
- * Action when guest read ITC.
- */
-uint64_t vtm_get_itc(VCPU *vcpu)
-{
- uint64_t guest_itc;
- vtime_t *vtm = &VMX(vcpu, vtm);
-
- guest_itc = now_itc(vtm);
- return guest_itc;
-}
-
-
-void vtm_set_itc(VCPU *vcpu, uint64_t new_itc)
-{
- int i;
- uint64_t vitm, vtm_offset;
- vtime_t *vtm;
- VCPU *v;
- struct domain *d = vcpu->domain;
-
- vitm = VCPU(vcpu, itm);
- vtm = &VMX(vcpu, vtm);
- if (vcpu->vcpu_id == 0) {
- vtm_offset = new_itc - ia64_get_itc();
- for (i = d->max_vcpus - 1; i >= 0; i--) {
- if ((v = d->vcpu[i]) != NULL) {
- VMX(v, vtm).vtm_offset = vtm_offset;
- VMX(v, vtm).last_itc = 0;
- }
- }
- }
- vtm->last_itc = 0;
- if (vitm <= new_itc)
- stop_timer(&vtm->vtm_timer);
- else
- vtm_set_itm(vcpu, vitm);
-}
-
-
-extern u64 cycle_to_ns(u64 cyle);
-
-
-void vtm_set_itm(VCPU *vcpu, uint64_t val)
-{
- vtime_t *vtm;
- uint64_t vitv, cur_itc, expires;
-
- vitv = VCPU(vcpu, itv);
- vtm = &VMX(vcpu, vtm);
- VCPU(vcpu, itm) = val;
- if (val > vtm->last_itc) {
- cur_itc = now_itc(vtm);
- if (time_before(val, cur_itc))
- val = cur_itc;
- expires = NOW() + cycle_to_ns(val-cur_itc);
- vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv));
- set_timer(&vtm->vtm_timer, expires);
- }else{
- stop_timer(&vtm->vtm_timer);
- }
-}
-
-
-void vtm_set_itv(VCPU *vcpu, uint64_t val)
-{
- vtime_t *vtm = &VMX(vcpu, vtm);
-
- VCPU(vcpu, itv) = val;
-
- if (!ITV_IRQ_MASK(val) && vtm->pending) {
- vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
- vtm->pending = 0;
- }
-}
-
-
-void vlsapic_reset(VCPU *vcpu)
-{
- int i;
-
- VCPU(vcpu, lid) = VCPU_LID(vcpu);
- VCPU(vcpu, ivr) = 0;
- VCPU(vcpu,tpr) = 0x10000;
- VCPU(vcpu, eoi) = 0;
- VCPU(vcpu, irr[0]) = 0;
- VCPU(vcpu, irr[1]) = 0;
- VCPU(vcpu, irr[2]) = 0;
- VCPU(vcpu, irr[3]) = 0;
- VCPU(vcpu, pmv) = 0x10000;
- VCPU(vcpu, cmcv) = 0x10000;
- VCPU(vcpu, lrr0) = 0x10000; // default reset value?
- VCPU(vcpu, lrr1) = 0x10000; // default reset value?
- update_vhpi(vcpu, NULL_VECTOR);
- VLSAPIC_XTP(vcpu) = 0x80; // disabled
- for ( i=0; i<4; i++) {
- VLSAPIC_INSVC(vcpu,i) = 0;
- }
-
- dprintk(XENLOG_INFO, "VLSAPIC inservice base=%p\n", &VLSAPIC_INSVC(vcpu,0) );
-}
-
-/*
- * Find highest signaled bits in 4 words (long).
- *
- * return 0-255: highest bits.
- * -1 : Not found.
- */
-static __inline__ int highest_bits(uint64_t *dat)
-{
- uint64_t bits, bitnum;
- int i;
-
- /* loop for all 256 bits */
- for ( i=3; i >= 0 ; i -- ) {
- bits = dat[i];
- if ( bits ) {
- bitnum = ia64_fls(bits);
- return i*64+bitnum;
- }
- }
- return NULL_VECTOR;
-}
-
-/*
- * Return 0-255 for pending irq.
- * NULL_VECTOR: when no pending.
- */
-static int highest_pending_irq(VCPU *vcpu)
-{
- if ( VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
- if ( VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
- return highest_bits(&VCPU(vcpu, irr[0]));
-}
-
-static int highest_inservice_irq(VCPU *vcpu)
-{
- if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
- if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
- return highest_bits(&(VLSAPIC_INSVC(vcpu, 0)));
-}
-
-/*
- * The pending irq is higher than the inservice one.
- *
- */
-static int is_higher_irq(int pending, int inservice)
-{
- return ( (pending > inservice) ||
- ((pending != NULL_VECTOR) && (inservice == NULL_VECTOR)) );
-}
-
-static int is_higher_class(int pending, int mic)
-{
- return ( (pending >> 4) > mic );
-}
-
-#define IRQ_NO_MASKED 0
-#define IRQ_MASKED_BY_VTPR 1
-#define IRQ_MASKED_BY_INSVC 2 // masked by inservice IRQ
-
-/* See Table 5-8 in SDM vol2 for the definition */
-static int
-_xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
-{
- tpr_t vtpr;
-
- vtpr.val = VCPU(vcpu, tpr);
-
- if ( h_inservice == NMI_VECTOR ) {
- return IRQ_MASKED_BY_INSVC;
- }
- if ( h_pending == NMI_VECTOR ) {
- // Non Maskable Interrupt
- return IRQ_NO_MASKED;
- }
- if ( h_inservice == ExtINT_VECTOR ) {
- return IRQ_MASKED_BY_INSVC;
- }
-
- if ( h_pending == ExtINT_VECTOR ) {
- if ( vtpr.mmi ) {
- // mask all external IRQ
- return IRQ_MASKED_BY_VTPR;
- }
- else {
- return IRQ_NO_MASKED;
- }
- }
-
- if ( is_higher_irq(h_pending, h_inservice) ) {
- if ( is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)) ) {
- return IRQ_NO_MASKED;
- }
- else {
- return IRQ_MASKED_BY_VTPR;
- }
- }
- else {
- return IRQ_MASKED_BY_INSVC;
- }
-}
-
-static int irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
-{
- int mask;
-
- mask = _xirq_masked(vcpu, h_pending, h_inservice);
- return mask;
-}
-
-
-/*
- * May come from virtualization fault or
- * nested host interrupt.
- */
-int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector)
-{
- int ret;
-
- if (vector & ~0xff) {
- gdprintk(XENLOG_INFO, "vmx_vcpu_pend_interrupt: bad vector\n");
- return -1;
- }
- ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0]));
-
- if (!ret) {
- vcpu->arch.irq_new_pending = 1;
- wmb();
- }
-
- return ret;
-}
-
-
-/*
- * Add batch of pending interrupt.
- * The interrupt source is contained in pend_irr[0-3] with
- * each bits stand for one interrupt.
- */
-void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, u64 *pend_irr)
-{
- uint64_t spsr;
- int i;
-
- local_irq_save(spsr);
- for (i=0 ; i<4; i++ ) {
- VCPU(vcpu,irr[i]) |= pend_irr[i];
- }
- local_irq_restore(spsr);
- vcpu->arch.irq_new_pending = 1;
- wmb();
-}
-
-/*
- * If the new pending interrupt is enabled and not masked, we directly inject
- * it into the guest. Otherwise, we set the VHPI if vac.a_int=1 so that when
- * the interrupt becomes unmasked, it gets injected.
- * RETURN:
- * the highest unmasked interrupt.
- *
- * Optimization: We defer setting the VHPI until the EOI time, if a higher
- * priority interrupt is in-service. The idea is to reduce the
- * number of unnecessary calls to inject_vhpi.
- */
-int vmx_check_pending_irq(VCPU *vcpu)
-{
- int mask, h_pending, h_inservice;
- uint64_t isr;
- IA64_PSR vpsr;
- REGS *regs=vcpu_regs(vcpu);
- h_pending = highest_pending_irq(vcpu);
- if ( h_pending == NULL_VECTOR ) {
- update_vhpi(vcpu, NULL_VECTOR);
- h_pending = SPURIOUS_VECTOR;
- goto chk_irq_exit;
- }
- h_inservice = highest_inservice_irq(vcpu);
-
- vpsr.val = VCPU(vcpu, vpsr);
- mask = irq_masked(vcpu, h_pending, h_inservice);
- if ( vpsr.i && IRQ_NO_MASKED == mask ) {
- isr = vpsr.val & IA64_PSR_RI;
- if ( !vpsr.ic )
- panic_domain(regs,"Interrupt when IC=0\n");
- update_vhpi(vcpu, h_pending);
- vmx_reflect_interruption(0, isr, 0, 12, regs); // EXT IRQ
- } else if (mask == IRQ_MASKED_BY_INSVC) {
- if (VCPU(vcpu, vhpi))
- update_vhpi(vcpu, NULL_VECTOR);
- }
- else {
- // masked by vpsr.i or vtpr.
- update_vhpi(vcpu,h_pending);
- }
-
-chk_irq_exit:
- return h_pending;
-}
-
-/*
- * Set a INIT interruption request to vcpu[0] of target domain.
- * The INIT interruption is injected into each vcpu by guest firmware.
- */
-void vmx_pend_pal_init(struct domain *d)
-{
- VCPU *vcpu;
-
- vcpu = d->vcpu[0];
- vcpu->arch.arch_vmx.pal_init_pending = 1;
-}
-
-/*
- * Only coming from virtualization fault.
- */
-void guest_write_eoi(VCPU *vcpu)
-{
- int vec;
-
- vec = highest_inservice_irq(vcpu);
- if (vec == NULL_VECTOR) {
- gdprintk(XENLOG_WARNING, "vcpu(%d): Wrong vector to EOI\n",
- vcpu->vcpu_id);
- return;
- }
- VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
- VCPU(vcpu, eoi)=0; // overwrite the data
- vcpu->arch.irq_new_pending=1;
- wmb();
-}
-
-int is_unmasked_irq(VCPU *vcpu)
-{
- int h_pending, h_inservice;
-
- h_pending = highest_pending_irq(vcpu);
- h_inservice = highest_inservice_irq(vcpu);
- if ( h_pending == NULL_VECTOR ||
- irq_masked(vcpu, h_pending, h_inservice) != IRQ_NO_MASKED ) {
- return 0;
- }
- else
- return 1;
-}
-
-uint64_t guest_read_vivr(VCPU *vcpu)
-{
- int vec, h_inservice, mask;
- vec = highest_pending_irq(vcpu);
- h_inservice = highest_inservice_irq(vcpu);
- mask = irq_masked(vcpu, vec, h_inservice);
- if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
- if (VCPU(vcpu, vhpi))
- update_vhpi(vcpu, NULL_VECTOR);
- return IA64_SPURIOUS_INT_VECTOR;
- }
- if (mask == IRQ_MASKED_BY_VTPR) {
- update_vhpi(vcpu, vec);
- return IA64_SPURIOUS_INT_VECTOR;
- }
- VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
- vmx_vcpu_unpend_interrupt(vcpu, vec);
- return (uint64_t)vec;
-}
-
-static void generate_exirq(VCPU *vcpu)
-{
- IA64_PSR vpsr;
- uint64_t isr;
- REGS *regs=vcpu_regs(vcpu);
- vpsr.val = VCPU(vcpu, vpsr);
- isr = vpsr.val & IA64_PSR_RI;
- if ( !vpsr.ic )
- panic_domain(regs,"Interrupt when IC=0\n");
- vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
-}
-
-void vhpi_detection(VCPU *vcpu)
-{
- uint64_t threshold,vhpi;
- tpr_t vtpr;
- IA64_PSR vpsr;
- vpsr.val = VCPU(vcpu, vpsr);
- vtpr.val = VCPU(vcpu, tpr);
-
- threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
- vhpi = VCPU(vcpu,vhpi);
- if ( vhpi > threshold ) {
- // interrupt actived
- generate_exirq (vcpu);
- }
-}
-
-void vmx_vexirq(VCPU *vcpu)
-{
- generate_exirq (vcpu);
-}
-
-struct vcpu *lid_to_vcpu(struct domain *d, uint16_t dest)
-{
- int id = dest >> 8;
-
- /* Fast look: assume EID=0 ID=vcpu_id. */
- if ((dest & 0xff) == 0 && id < d->max_vcpus)
- return d->vcpu[id];
- return NULL;
-}
-
-/*
- * To inject INIT to guest, we must set the PAL_INIT entry
- * and set psr to switch to physical mode
- */
-#define PSR_SET_BITS (IA64_PSR_DT | IA64_PSR_IT | IA64_PSR_RT | \
- IA64_PSR_IC | IA64_PSR_RI | IA64_PSR_I | IA64_PSR_CPL)
-
-static void vmx_inject_guest_pal_init(VCPU *vcpu)
-{
- REGS *regs = vcpu_regs(vcpu);
- uint64_t psr = vmx_vcpu_get_psr(vcpu);
-
- regs->cr_iip = PAL_INIT_ENTRY;
-
- psr = psr & ~PSR_SET_BITS;
- vmx_vcpu_set_psr(vcpu, psr);
-}
-
-
-/*
- * Deliver IPI message. (Only U-VP is supported now)
- * offset: address offset to IPI space.
- * value: deliver value.
- */
-static int vcpu_deliver_int(VCPU *vcpu, uint64_t dm, uint64_t vector)
-{
- int running = vcpu->is_running;
-
- IPI_DPRINTK("deliver_int %lx %lx\n", dm, vector);
-
- switch (dm) {
- case SAPIC_FIXED: // INT
- vmx_vcpu_pend_interrupt(vcpu, vector);
- break;
- case SAPIC_LOWEST_PRIORITY:
- {
- struct vcpu *lowest = vcpu_viosapic(vcpu)->lowest_vcpu;
-
- if (lowest == NULL)
- lowest = vcpu;
- vmx_vcpu_pend_interrupt(lowest, vector);
- break;
- }
- case SAPIC_PMI:
- // TODO -- inject guest PMI
- panic_domain(NULL, "Inject guest PMI!\n");
- break;
- case SAPIC_NMI:
- vmx_vcpu_pend_interrupt(vcpu, 2);
- break;
- case SAPIC_INIT:
- vmx_inject_guest_pal_init(vcpu);
- break;
- case SAPIC_EXTINT: // ExtINT
- vmx_vcpu_pend_interrupt(vcpu, 0);
- break;
- default:
- return -EINVAL;
- }
-
- /* Kick vcpu. */
- vcpu_unblock(vcpu);
- if (running)
- smp_send_event_check_cpu(vcpu->processor);
-
- return 0;
-}
-
-int vlsapic_deliver_int(struct domain *d,
- uint16_t dest, uint64_t dm, uint64_t vector)
-{
- VCPU *vcpu;
-
- vcpu = lid_to_vcpu(d, dest);
- if (vcpu == NULL)
- return -ESRCH;
-
- if (!vcpu->is_initialised || test_bit(_VPF_down, &vcpu->pause_flags))
- return -ENOEXEC;
-
- return vcpu_deliver_int (vcpu, dm, vector);
-}
-
-/*
- * Deliver the INIT interruption to guest.
- */
-void deliver_pal_init(VCPU *vcpu)
-{
- vcpu_deliver_int(vcpu, SAPIC_INIT, 0);
-}
-
-/*
- * execute write IPI op.
- */
-static void vlsapic_write_ipi(VCPU *vcpu, uint64_t addr, uint64_t value)
-{
- VCPU *targ;
- struct domain *d = vcpu->domain;
-
- targ = lid_to_vcpu(vcpu->domain,
- (((ipi_a_t)addr).id << 8) | ((ipi_a_t)addr).eid);
- if (targ == NULL)
- panic_domain(NULL, "Unknown IPI cpu\n");
-
- if (!targ->is_initialised ||
- test_bit(_VPF_down, &targ->pause_flags)) {
-
- struct pt_regs *targ_regs = vcpu_regs(targ);
-
- if (arch_set_info_guest(targ, NULL) != 0) {
- printk("arch_boot_vcpu: failure\n");
- return;
- }
- /* First or next rendez-vous: set registers. */
- vcpu_init_regs(targ);
- targ_regs->cr_iip = d->arch.sal_data->boot_rdv_ip;
- targ_regs->r1 = d->arch.sal_data->boot_rdv_r1;
-
- if (test_and_clear_bit(_VPF_down,&targ->pause_flags)) {
- vcpu_wake(targ);
- printk(XENLOG_DEBUG "arch_boot_vcpu: vcpu %d awaken %016lx!\n",
- targ->vcpu_id, targ_regs->cr_iip);
- } else {
- printk("arch_boot_vcpu: huh, already awake!");
- }
- } else {
- if (((ipi_d_t)value).dm == SAPIC_LOWEST_PRIORITY ||
- vcpu_deliver_int(targ, ((ipi_d_t)value).dm,
- ((ipi_d_t)value).vector) < 0)
- panic_domain(NULL, "Deliver reserved interrupt!\n");
- }
- return;
-}
-
-
-unsigned long vlsapic_read(struct vcpu *v,
- unsigned long addr,
- unsigned long length)
-{
- uint64_t result = 0;
-
- addr &= (PIB_SIZE - 1);
-
- switch (addr) {
- case PIB_OFST_INTA:
- if (length == 1) // 1 byte load
- ; // There is no i8259, there is no INTA access
- else
- panic_domain(NULL,"Undefined read on PIB INTA\n");
-
- break;
- case PIB_OFST_XTP:
- if (length == 1) {
- result = VLSAPIC_XTP(v);
- // printk("read xtp %lx\n", result);
- } else {
- panic_domain(NULL, "Undefined read on PIB XTP\n");
- }
- break;
- default:
- if (PIB_LOW_HALF(addr)) { // lower half
- if (length != 8 )
- panic_domain(NULL, "Undefined IPI-LHF read!\n");
- else
- IPI_DPRINTK("IPI-LHF read %lx\n", pib_off);
- } else { // upper half
- IPI_DPRINTK("IPI-UHF read %lx\n", addr);
- }
- break;
- }
- return result;
-}
-
-static void vlsapic_write_xtp(struct vcpu *v, uint8_t val)
-{
- struct viosapic * viosapic;
- struct vcpu *lvcpu, *vcpu;
- viosapic = vcpu_viosapic(v);
-
- spin_lock(&viosapic->lock);
- lvcpu = viosapic->lowest_vcpu;
- VLSAPIC_XTP(v) = val;
-
- for_each_vcpu(v->domain, vcpu) {
- if (VLSAPIC_XTP(lvcpu) > VLSAPIC_XTP(vcpu))
- lvcpu = vcpu;
- }
-
- if (VLSAPIC_XTP(lvcpu) & 0x80) // Disabled
- lvcpu = NULL;
-
- viosapic->lowest_vcpu = lvcpu;
- spin_unlock(&viosapic->lock);
-}
-
-void vlsapic_write(struct vcpu *v,
- unsigned long addr,
- unsigned long length,
- unsigned long val)
-{
- addr &= (PIB_SIZE - 1);
-
- switch (addr) {
- case PIB_OFST_INTA:
- panic_domain(NULL, "Undefined write on PIB INTA\n");
- break;
- case PIB_OFST_XTP:
- if (length == 1) {
- // printk("write xtp %lx\n", val);
- vlsapic_write_xtp(v, val);
- } else {
- panic_domain(NULL, "Undefined write on PIB XTP\n");
- }
- break;
- default:
- if (PIB_LOW_HALF(addr)) { // lower half
- if (length != 8)
- panic_domain(NULL, "Undefined IPI-LHF write with size %ld!\n",
- length);
- else
- vlsapic_write_ipi(v, addr, val);
- }
- else { // upper half
- // printk("IPI-UHF write %lx\n",addr);
- panic_domain(NULL, "No support for SM-VP yet\n");
- }
- break;
- }
-}
-
-static int vlsapic_save(struct domain *d, hvm_domain_context_t *h)
-{
- struct vcpu *v;
-
- for_each_vcpu(d, v) {
- struct hvm_hw_ia64_vlsapic vlsapic;
- int i;
-
- if (test_bit(_VPF_down, &v->pause_flags))
- continue;
-
- memset(&vlsapic, 0, sizeof(vlsapic));
- for (i = 0; i < 4; i++)
- vlsapic.insvc[i] = VLSAPIC_INSVC(v,i);
-
- vlsapic.vhpi = VCPU(v, vhpi);
- vlsapic.xtp = VLSAPIC_XTP(v);
- vlsapic.pal_init_pending = v->arch.arch_vmx.pal_init_pending;
-
- if (hvm_save_entry(VLSAPIC, v->vcpu_id, h, &vlsapic))
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vlsapic_load(struct domain *d, hvm_domain_context_t *h)
-{
- uint16_t vcpuid;
- struct vcpu *v;
- struct hvm_hw_ia64_vlsapic vlsapic;
- int i;
-
- vcpuid = hvm_load_instance(h);
- if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
- gdprintk(XENLOG_ERR,
- "%s: domain has no vlsapic %u\n", __func__, vcpuid);
- return -EINVAL;
- }
-
- if (hvm_load_entry(VLSAPIC, h, &vlsapic) != 0)
- return -EINVAL;
-
- for (i = 0; i < 4; i++)
- VLSAPIC_INSVC(v,i) = vlsapic.insvc[i];
-
- VCPU(v, vhpi) = vlsapic.vhpi;
- VLSAPIC_XTP(v) = vlsapic.xtp;
- v->arch.arch_vmx.pal_init_pending = vlsapic.pal_init_pending;
- v->arch.irq_new_pending = 1; /* to force checking irq */
-
- return 0;
-}
-
-HVM_REGISTER_SAVE_RESTORE(VLSAPIC, vlsapic_save, vlsapic_load,
- 1, HVMSR_PER_VCPU);
-
-static int vtime_save(struct domain *d, hvm_domain_context_t *h)
-{
- struct vcpu *v;
-
- for_each_vcpu(d, v) {
- vtime_t *vtm = &VMX(v, vtm);
- struct hvm_hw_ia64_vtime vtime;
-
- if (test_bit(_VPF_down, &v->pause_flags))
- continue;
-
- stop_timer(&vtm->vtm_timer);//XXX should wait for callback not running.
-
- memset(&vtime, 0, sizeof(vtime));
- vtime.itc = now_itc(vtm);
- vtime.itm = VCPU(v, itm);
- vtime.last_itc = vtm->last_itc;
- vtime.pending = vtm->pending;
-
- vtm_set_itm(v, vtime.itm);// this may start timer.
-
- if (hvm_save_entry(VTIME, v->vcpu_id, h, &vtime))
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vtime_load(struct domain *d, hvm_domain_context_t *h)
-{
- uint16_t vcpuid;
- struct vcpu *v;
- struct hvm_hw_ia64_vtime vtime;
- vtime_t *vtm;
-
- vcpuid = hvm_load_instance(h);
- if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
- gdprintk(XENLOG_ERR,
- "%s: domain has no vtime %u\n", __func__, vcpuid);
- return -EINVAL;
- }
-
- if (hvm_load_entry(VTIME, h, &vtime) != 0)
- return -EINVAL;
-
- vtm = &VMX(v, vtm);
- stop_timer(&vtm->vtm_timer); //XXX should wait for callback not running.
-
- vtm->last_itc = vtime.last_itc;
- vtm->pending = vtime.pending;
-
- migrate_timer(&vtm->vtm_timer, v->processor);
- vtm_set_itm(v, vtime.itm);
- vtm_set_itc(v, vtime.itc); // This may start timer.
-
- if (test_and_clear_bit(_VPF_down, &v->pause_flags))
- vcpu_wake(v);
-
- return 0;
-}
-
-HVM_REGISTER_SAVE_RESTORE(VTIME, vtime_save, vtime_load, 1, HVMSR_PER_VCPU);
diff --git a/xen/arch/ia64/vmx/vmmu.c b/xen/arch/ia64/vmx/vmmu.c
deleted file mode 100644
index d92c7c73ff..0000000000
--- a/xen/arch/ia64/vmx/vmmu.c
+++ /dev/null
@@ -1,625 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmmu.c: virtual memory management unit components.
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- */
-#include <asm/vmx_vcpu.h>
-#include <asm/vmx_pal_vsa.h>
-#include <xen/sched-if.h>
-#include <asm/vhpt.h>
-
-static int default_vtlb_sz = DEFAULT_VTLB_SZ;
-static int default_vhpt_sz = DEFAULT_VHPT_SZ;
-
-static void __init parse_vtlb_size(char *s)
-{
- int sz = parse_size_and_unit(s, NULL);
-
- if (sz > 0) {
- default_vtlb_sz = fls(sz - 1);
- /* minimum 16KB (for tag uniqueness) */
- if (default_vtlb_sz < 14)
- default_vtlb_sz = 14;
- }
-}
-
-static void __init parse_vhpt_size(char *s)
-{
- int sz = parse_size_and_unit(s, NULL);
- if (sz > 0) {
- default_vhpt_sz = fls(sz - 1);
- default_vhpt_sz = canonicalize_vhpt_size(default_vhpt_sz);
- }
-}
-
-custom_param("vti_vtlb_size", parse_vtlb_size);
-custom_param("vti_vhpt_size", parse_vhpt_size);
-
-
-static int init_domain_vhpt(struct vcpu *v)
-{
- int rc;
- u64 size = v->domain->arch.hvm_domain.params[HVM_PARAM_VHPT_SIZE];
-
- if (size == 0)
- size = default_vhpt_sz;
- else
- size = canonicalize_vhpt_size(size);
-
- rc = thash_alloc(&(v->arch.vhpt), size, "vhpt");
- v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val;
- return rc;
-}
-
-
-static void free_domain_vhpt(struct vcpu *v)
-{
- if (v->arch.vhpt.hash)
- thash_free(&(v->arch.vhpt));
-}
-
-int init_domain_tlb(struct vcpu *v)
-{
- int rc;
-
- rc = init_domain_vhpt(v);
- if (rc)
- return rc;
-
- rc = thash_alloc(&(v->arch.vtlb), default_vtlb_sz, "vtlb");
- if (rc) {
- free_domain_vhpt(v);
- return rc;
- }
-
- return 0;
-}
-
-
-void free_domain_tlb(struct vcpu *v)
-{
- if (v->arch.vtlb.hash)
- thash_free(&(v->arch.vtlb));
-
- free_domain_vhpt(v);
-}
-
-
-int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
-{
- ia64_rr vrr;
- PTA vpta;
- IA64_PSR vpsr;
-
- vpsr.val = VCPU(vcpu, vpsr);
- vcpu_get_rr(vcpu, vadr, &vrr.rrval);
- vpta.val = vmx_vcpu_get_pta(vcpu);
-
- if ( vrr.ve & vpta.ve ) {
- switch ( ref ) {
- case DATA_REF:
- case NA_REF:
- return vpsr.dt;
- case INST_REF:
- return vpsr.dt && vpsr.it && vpsr.ic;
- case RSE_REF:
- return vpsr.dt && vpsr.rt;
-
- }
- }
- return 0;
-}
-
-
-int unimplemented_gva(VCPU *vcpu,u64 vadr)
-{
-#if 0
- int bit=vcpu->domain->arch.imp_va_msb;
- u64 ladr =(vadr<<3)>>(3+bit);
- if(!ladr||ladr==(1U<<(61-bit))-1){
- return 0;
- }else{
- return 1;
- }
-#else
- return 0;
-#endif
-}
-
-
-/*
- * Fetch guest bundle code.
- * INPUT:
- * gip: guest ip
- * pbundle: used to return fetched bundle.
- */
-unsigned long
-fetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle)
-{
- u64 gpip=0; // guest physical IP
- u64 *vpa;
- thash_data_t *tlb;
- u64 mfn, maddr;
- struct page_info* page;
-
- again:
- if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
- gpip = pa_clear_uc(gip); // clear UC bit
- }
- else {
- tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);
-// if( tlb == NULL )
-// tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
- if (tlb)
- gpip = thash_translate(tlb, gip);
- }
- if( gpip){
- mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
- if (mfn == INVALID_MFN)
- panic_domain(vcpu_regs(vcpu), "fetch_code: invalid memory\n");
- maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
- }else{
- tlb = vhpt_lookup(gip);
- if (tlb == NULL) {
- ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
- return IA64_RETRY;
- }
- maddr = thash_translate(tlb, gip);
- mfn = maddr >> PAGE_SHIFT;
- }
-
- page = mfn_to_page(mfn);
- if (get_page(page, vcpu->domain) == 0) {
- if (page_get_owner(page) != vcpu->domain) {
- // This page might be a page granted by another domain.
- panic_domain(NULL, "domain tries to execute foreign domain "
- "page which might be mapped by grant table.\n");
- }
- goto again;
- }
- vpa = (u64 *)__va(maddr);
-
- pbundle->i64[0] = *vpa++;
- pbundle->i64[1] = *vpa;
- put_page(page);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
-{
-#ifdef VTLB_DEBUG
- int slot;
- u64 ps, va;
- ps = itir_ps(itir);
- va = PAGEALIGN(ifa, ps);
- slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
- if (slot >=0) {
- // generate MCA.
- panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
- return IA64_FAULT;
- }
-#endif //VTLB_DEBUG
- pte &= ~PAGE_FLAGS_RV_MASK;
- thash_purge_and_insert(vcpu, pte, itir, ifa, ISIDE_TLB);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
-{
-#ifdef VTLB_DEBUG
- int slot;
- u64 ps, va;
- ps = itir_ps(itir);
- va = PAGEALIGN(ifa, ps);
- slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
- if (slot >=0) {
- // generate MCA.
- panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
- return IA64_FAULT;
- }
-#endif //VTLB_DEBUG
- pte &= ~PAGE_FLAGS_RV_MASK;
- thash_purge_and_insert(vcpu, pte, itir, ifa, DSIDE_TLB);
- return IA64_NO_FAULT;
-
-}
-
-
-
-
-IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
-{
-#ifdef VTLB_DEBUG
- int index;
-#endif
- u64 ps, va, rid;
- thash_data_t * p_itr;
- ps = itir_ps(itir);
- va = PAGEALIGN(ifa, ps);
-#ifdef VTLB_DEBUG
- index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
- if (index >=0) {
- // generate MCA.
- panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
- return IA64_FAULT;
- }
- thash_purge_entries(vcpu, va, ps);
-#endif
-
- if (slot >= NITRS) {
- panic_domain(NULL, "bad itr.i slot (%ld)", slot);
- return IA64_FAULT;
- }
-
- pte &= ~PAGE_FLAGS_RV_MASK;
- vcpu_get_rr(vcpu, va, &rid);
- rid = rid& RR_RID_MASK;
- p_itr = (thash_data_t *)&vcpu->arch.itrs[slot];
- vmx_vcpu_set_tr(p_itr, pte, itir, va, rid);
- vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
- return IA64_NO_FAULT;
-}
-
-
-IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
-{
-#ifdef VTLB_DEBUG
- int index;
-#endif
- u64 gpfn;
- u64 ps, va, rid;
- thash_data_t * p_dtr;
-
- ps = itir_ps(itir);
- va = PAGEALIGN(ifa, ps);
-#ifdef VTLB_DEBUG
- index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
- if (index>=0) {
- // generate MCA.
- panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
- return IA64_FAULT;
- }
-#endif
-
- if (slot >= NDTRS) {
- panic_domain(NULL, "bad itr.d slot (%ld)", slot);
- return IA64_FAULT;
- }
-
- pte &= ~PAGE_FLAGS_RV_MASK;
-
- /* This is a bad workaround
- In Linux, region 7 use 16M pagesize and is identity mapped.
- VHPT page size is 16K in XEN. If purge VHPT while guest insert 16M,
- it will iteratively purge VHPT 1024 times, which makes XEN/IPF very
- slow. XEN doesn't purge VHPT
- */
- if (ps != _PAGE_SIZE_16M)
- thash_purge_entries(vcpu, va, ps);
- gpfn = pte_pfn(__pte(pte));
- vcpu_get_rr(vcpu, va, &rid);
- rid &= RR_RID_MASK;
- p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
- vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid);
- vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
- return IA64_NO_FAULT;
-}
-
-
-
-IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,u64 ifa, u64 ps)
-{
- int index;
- u64 va;
-
- va = PAGEALIGN(ifa, ps);
- while ((index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB)) >= 0) {
- vcpu->arch.dtrs[index].pte.p=0;
- }
- thash_purge_entries(vcpu, va, ps);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu, u64 ifa, u64 ps)
-{
- int index;
- u64 va;
-
- va = PAGEALIGN(ifa, ps);
- while ((index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB)) >= 0) {
- vcpu->arch.itrs[index].pte.p=0;
- }
- thash_purge_entries(vcpu, va, ps);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, u64 va, u64 ps)
-{
- va = PAGEALIGN(va, ps);
- thash_purge_entries(vcpu, va, ps);
- return IA64_NO_FAULT;
-}
-
-
-IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, u64 va)
-{
- thash_purge_all(vcpu);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, u64 va, u64 ps)
-{
- return vmx_vcpu_ptc_ga(vcpu, va, ps);
-}
-/*
-IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
-{
- vmx_vcpu_ptc_l(vcpu, va, ps);
- return IA64_NO_FAULT;
-}
- */
-struct ptc_ga_args {
- unsigned long vadr;
- unsigned long rid;
- unsigned long ps;
- struct vcpu *vcpu;
-};
-
-static void ptc_ga_remote_func (void *varg)
-{
- u64 oldrid, moldrid, mpta, oldpsbits, vadr, flags;
- struct ptc_ga_args *args = (struct ptc_ga_args *)varg;
- VCPU *v = args->vcpu;
- int cpu = v->processor;
-
- vadr = args->vadr;
-
- /* Try again if VCPU has migrated. */
- if (cpu != current->processor)
- return;
- local_irq_save(flags);
- if (!pcpu_schedule_trylock(cpu))
- goto bail2;
- if (v->processor != cpu)
- goto bail1;
- oldrid = VMX(v, vrr[0]);
- VMX(v, vrr[0]) = args->rid;
- oldpsbits = VMX(v, psbits[0]);
- VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vadr)]);
- moldrid = ia64_get_rr(0x0);
- ia64_set_rr(0x0,vrrtomrr(v,args->rid));
- mpta = ia64_get_pta();
- ia64_set_pta(v->arch.arch_vmx.mpta&(~1));
- ia64_srlz_d();
- vadr = PAGEALIGN(vadr, args->ps);
- thash_purge_entries_remote(v, vadr, args->ps);
- VMX(v, vrr[0]) = oldrid;
- VMX(v, psbits[0]) = oldpsbits;
- ia64_set_rr(0x0,moldrid);
- ia64_set_pta(mpta);
- ia64_dv_serialize_data();
- args->vcpu = NULL;
-bail1:
- pcpu_schedule_unlock(cpu);
-bail2:
- local_irq_restore(flags);
-}
-
-
-IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
-{
-
- struct domain *d = vcpu->domain;
- struct vcpu *v;
- struct ptc_ga_args args;
- int cpu;
-
- args.vadr = va;
- vcpu_get_rr(vcpu, va, &args.rid);
- args.ps = ps;
- for_each_vcpu (d, v) {
- if (!v->is_initialised)
- continue;
-
- if (v == vcpu) {
- vmx_vcpu_ptc_l(v, va, ps);
- continue;
- }
-
- args.vcpu = v;
- do {
- cpu = v->processor;
- if (cpu != current->processor) {
- spin_barrier(per_cpu(schedule_data, cpu).schedule_lock);
- /* Flush VHPT on remote processors. */
- smp_call_function_single(cpu, &ptc_ga_remote_func, &args, 1);
- } else {
- ptc_ga_remote_func(&args);
- }
- } while (args.vcpu != NULL);
- }
- return IA64_NO_FAULT;
-}
-
-
-u64 vmx_vcpu_thash(VCPU *vcpu, u64 vadr)
-{
- PTA vpta;
- ia64_rr vrr;
- u64 pval;
- u64 vhpt_offset;
- u64 mask;
-
- vpta.val = vmx_vcpu_get_pta(vcpu);
- vcpu_get_rr(vcpu, vadr, &vrr.rrval);
- mask = (1UL << vpta.size) - 1;
- if (vpta.vf) {
- vadr = (vadr & 0x1fffffffffffffffUL) >> vrr.ps;
- vhpt_offset = vadr ^ vrr.rid;
- pval = (vpta.val & ~0x7fffUL) + ((vhpt_offset << 5) & mask);
- } else {
- vhpt_offset=((vadr >> vrr.ps) << 3) & mask;
- pval = (vadr & VRN_MASK) |
- (vpta.val << 3 >> (vpta.size + 3) << vpta.size) |
- vhpt_offset;
- }
- return pval;
-}
-
-
-u64 vmx_vcpu_ttag(VCPU *vcpu, u64 vadr)
-{
- ia64_rr vrr;
- PTA vpta;
- u64 pval;
- u64 rid;
- vpta.val = vmx_vcpu_get_pta(vcpu);
- vcpu_get_rr(vcpu, vadr, &vrr.rrval);
- if(vpta.vf){
- vadr = (vadr & 0x1fffffffffffffffUL) >> vrr.ps;
- rid = vrr.rid;
- pval = vadr ^ (rid << 39);
- }else{
- pval = 1;
- }
- return pval;
-}
-
-
-
-IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 vadr, u64 *padr)
-{
- thash_data_t *data;
- ISR visr,pt_isr;
- REGS *regs;
- u64 vhpt_adr, madr;
- IA64_PSR vpsr;
-
- regs = vcpu_regs(vcpu);
- pt_isr.val = VMX(vcpu, cr_isr);
- visr.val = 0;
- visr.ei = pt_isr.ei;
- visr.ir = pt_isr.ir;
- vpsr.val = VCPU(vcpu, vpsr);
- visr.na = 1;
-
- /* First look in VTLB. */
- data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
- if (data) {
- if (data->p == 0) {
- vcpu_set_isr(vcpu,visr.val);
- data_page_not_present(vcpu, vadr);
- return IA64_FAULT;
- } else if (data->ma == VA_MATTR_NATPAGE) {
- vcpu_set_isr(vcpu, visr.val);
- dnat_page_consumption(vcpu, vadr);
- return IA64_FAULT;
- } else {
- *padr = thash_translate(data, vadr);
- return IA64_NO_FAULT;
- }
- }
-
- /* Look in mVHPT. */
- data = vhpt_lookup(vadr);
- if (data) {
- if (data->p == 0) {
- vcpu_set_isr(vcpu,visr.val);
- data_page_not_present(vcpu, vadr);
- return IA64_FAULT;
- } else if (data->ma == VA_MATTR_NATPAGE) {
- vcpu_set_isr(vcpu, visr.val);
- dnat_page_consumption(vcpu, vadr);
- return IA64_FAULT;
- } else {
- madr = thash_translate(data, vadr);
- *padr = __mpa_to_gpa(madr);
- return IA64_NO_FAULT;
- }
- }
-
- /* If VHPT is not enabled, inject fault. */
- if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
- if (vpsr.ic) {
- vcpu_set_isr(vcpu, visr.val);
- alt_dtlb(vcpu, vadr);
- return IA64_FAULT;
- } else {
- nested_dtlb(vcpu);
- return IA64_FAULT;
- }
- }
-
- /* Get gVHPT entry. */
- vhpt_adr = vmx_vcpu_thash(vcpu, vadr);
- data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB);
- if (data) {
- /* FIXME: we should read gadr from the entry! */
- if (vpsr.ic) {
- vcpu_set_isr(vcpu, visr.val);
- dtlb_fault(vcpu, vadr);
- return IA64_FAULT;
- } else {
- nested_dtlb(vcpu);
- return IA64_FAULT;
- }
- } else {
- if (vpsr.ic) {
- vcpu_set_isr(vcpu, visr.val);
- dvhpt_fault(vcpu, vadr);
- return IA64_FAULT;
- } else {
- nested_dtlb(vcpu);
- return IA64_FAULT;
- }
- }
-}
-
-u64 vmx_vcpu_tak(VCPU *vcpu, u64 vadr)
-{
- thash_data_t *data;
- u64 key;
-
- if (unimplemented_gva(vcpu, vadr)) {
- key = 1;
- return key;
- }
-
- data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
- if (data) {
- if (data->p)
- return data->key << 8;
- else
- return 1;
- }
-
- data = vhpt_lookup(vadr);
- if (data) {
- if (data->p)
- return data->key << 8; /* FIXME: possible mangling/masking. */
- else
- return 1;
- }
-
- if (!vhpt_enabled(vcpu, vadr, NA_REF))
- return 1;
-
- /* FIXME: look in the guest VHPT. */
- return 1;
-}
diff --git a/xen/arch/ia64/vmx/vmx_entry.S b/xen/arch/ia64/vmx/vmx_entry.S
deleted file mode 100644
index 3d11214440..0000000000
--- a/xen/arch/ia64/vmx/vmx_entry.S
+++ /dev/null
@@ -1,761 +0,0 @@
-/* -*- Mode:ASM; c-basic-offset:8; tab-width:8; indent-tabs-mode:t -*- */
-/*
- * vmx_entry.S:
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
- * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
- */
-
-#include <linux/config.h>
-#include <asm/asmmacro.h>
-#include <asm/offsets.h>
-#include "vmx_minstate.h"
-
-GLOBAL_ENTRY(ia64_leave_nested)
- rsm psr.i
- ;;
- adds r21=PT(PR)+16,r12
- ;;
- lfetch [r21],PT(CR_IPSR)-PT(PR)
- adds r2=PT(B6)+16,r12
- adds r3=PT(R16)+16,r12
- ;;
- lfetch [r21]
- ld8 r28=[r2],8 // load b6
- adds r29=PT(R24)+16,r12
-
- ld8.fill r16=[r3]
- adds r3=PT(AR_CSD)-PT(R16),r3
- adds r30=PT(AR_CCV)+16,r12
- ;;
- ld8.fill r24=[r29]
- ld8 r15=[r30] // load ar.ccv
- ;;
- ld8 r29=[r2],16 // load b7
- ld8 r30=[r3],16 // load ar.csd
- ;;
- ld8 r31=[r2],16 // load ar.ssd
- ld8.fill r8=[r3],16
- ;;
- ld8.fill r9=[r2],16
- ld8.fill r10=[r3],PT(R17)-PT(R10)
- ;;
- ld8.fill r11=[r2],PT(R18)-PT(R11)
- ld8.fill r17=[r3],16
- ;;
- ld8.fill r18=[r2],16
- ld8.fill r19=[r3],16
- ;;
- ld8.fill r20=[r2],16
- ld8.fill r21=[r3],16
- mov ar.csd=r30
- mov ar.ssd=r31
- ;;
- rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
- invala // invalidate ALAT
- ;;
- ld8.fill r22=[r2],24
- ld8.fill r23=[r3],24
- mov b6=r28
- ;;
- ld8.fill r25=[r2],16
- ld8.fill r26=[r3],16
- mov b7=r29
- ;;
- ld8.fill r27=[r2],16
- ld8.fill r28=[r3],16
- ;;
- ld8.fill r29=[r2],16
- ld8.fill r30=[r3],24
- ;;
- ld8.fill r31=[r2],PT(F9)-PT(R31)
- adds r3=PT(F10)-PT(F6),r3
- ;;
- ldf.fill f9=[r2],PT(F6)-PT(F9)
- ldf.fill f10=[r3],PT(F8)-PT(F10)
- ;;
- ldf.fill f6=[r2],PT(F7)-PT(F6)
- ;;
- ldf.fill f7=[r2],PT(F11)-PT(F7)
- ldf.fill f8=[r3],32
- ;;
- srlz.i // ensure interruption collection is off
- mov ar.ccv=r15
- ;;
- bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
- ;;
- ldf.fill f11=[r2]
- adds r16=PT(CR_IPSR)+16,r12
- adds r17=PT(CR_IIP)+16,r12
- ;;
- ld8 r29=[r16],16 // load cr.ipsr
- ld8 r28=[r17],16 // load cr.iip
- ;;
- ld8 r30=[r16],16 // load cr.ifs
- ld8 r25=[r17],16 // load ar.unat
- ;;
-#ifndef XEN
- ld8 r26=[r16],16 // load ar.pfs
- ld8 r27=[r17],16 // load ar.rsc
- cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
- ;;
- ld8 r24=[r16],16 // load ar.rnat (may be garbage)
- ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
- ;;
- ld8 r31=[r16],16 // load predicates
-#else
- ld8 r26=[r16],32 // load ar.pfs
- ld8 r27=[r17],32 // load ar.rsc
- ;;
- ld8 r31=[r16],32 // load predicates
-#endif
- ld8 r22=[r17],16 // load b0
- ;;
-#ifndef XEN
- ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
-#endif
- ld8.fill r1=[r17],16 // load r1
- ;;
- ld8.fill r12=[r16],16
- ld8.fill r13=[r17],16
- ;;
- ld8 r20=[r16],16 // ar.fpsr
- ld8.fill r15=[r17],16
- ;;
- ld8.fill r14=[r16],16
- ld8.fill r2=[r17]
- ;;
- ld8.fill r3=[r16]
-#ifndef XEN
- ;;
- mov r16=ar.bsp // get existing backing store pointer
- ;;
-#endif
- mov b0=r22
- mov ar.pfs=r26
- mov cr.ifs=r30
- mov cr.ipsr=r29
- mov ar.fpsr=r20
- mov cr.iip=r28
- ;;
- mov ar.rsc=r27
- mov ar.unat=r25
- mov pr=r31,-1
- rfi
-END(ia64_leave_nested)
-
-
-
-GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
- PT_REGS_UNWIND_INFO(0)
- /*
- * work.need_resched etc. mustn't get changed by this CPU before it returns to
- ;;
- * user- or fsys-mode, hence we disable interrupts early on:
- */
- adds r2 = PT(R4)+16,r12
- adds r3 = PT(R5)+16,r12
- adds r8 = PT(EML_UNAT)+16,r12
- ;;
- ld8 r8 = [r8]
- ;;
- mov ar.unat=r8
- ;;
- ld8.fill r4=[r2],16 //load r4
- ld8.fill r5=[r3],16 //load r5
- ;;
- ld8.fill r6=[r2] //load r6
- ld8.fill r7=[r3] //load r7
- ;;
-END(ia64_leave_hypervisor_prepare)
-//fall through
-GLOBAL_ENTRY(ia64_leave_hypervisor)
- PT_REGS_UNWIND_INFO(0)
- rsm psr.i
- ;;
- br.call.sptk.many b0=leave_hypervisor_tail
- ;;
- adds r20=PT(PR)+16,r12
- adds r8=PT(EML_UNAT)+16,r12
- ;;
- ld8 r8=[r8]
- ;;
- mov ar.unat=r8
- ;;
- lfetch [r20],PT(CR_IPSR)-PT(PR)
- adds r2 = PT(B6)+16,r12
- adds r3 = PT(B7)+16,r12
- ;;
- lfetch [r20]
- ;;
- ld8 r24=[r2],16 /* B6 */
- ld8 r25=[r3],16 /* B7 */
- ;;
- ld8 r26=[r2],16 /* ar_csd */
- ld8 r27=[r3],16 /* ar_ssd */
- mov b6 = r24
- ;;
- ld8.fill r8=[r2],16
- ld8.fill r9=[r3],16
- mov b7 = r25
- ;;
- mov ar.csd = r26
- mov ar.ssd = r27
- ;;
- ld8.fill r10=[r2],PT(R15)-PT(R10)
- ld8.fill r11=[r3],PT(R14)-PT(R11)
- ;;
- ld8.fill r15=[r2],PT(R16)-PT(R15)
- ld8.fill r14=[r3],PT(R17)-PT(R14)
- ;;
- ld8.fill r16=[r2],16
- ld8.fill r17=[r3],16
- ;;
- ld8.fill r18=[r2],16
- ld8.fill r19=[r3],16
- ;;
- ld8.fill r20=[r2],16
- ld8.fill r21=[r3],16
- ;;
- ld8.fill r22=[r2],16
- ld8.fill r23=[r3],16
- ;;
- ld8.fill r24=[r2],16
- ld8.fill r25=[r3],16
- ;;
- ld8.fill r26=[r2],16
- ld8.fill r27=[r3],16
- ;;
- ld8.fill r28=[r2],16
- ld8.fill r29=[r3],16
- ;;
- ld8.fill r30=[r2],PT(F6)-PT(R30)
- ld8.fill r31=[r3],PT(F7)-PT(R31)
- ;;
- rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
- invala // invalidate ALAT
- ;;
- ldf.fill f6=[r2],32
- ldf.fill f7=[r3],32
- ;;
- ldf.fill f8=[r2],32
- ldf.fill f9=[r3],32
- ;;
- ldf.fill f10=[r2],32
- ldf.fill f11=[r3],24
- ;;
- srlz.i // ensure interruption collection is off
- ;;
- bsw.0
- ;;
- adds r16 = PT(CR_IPSR)+16,r12
- adds r17 = PT(CR_IIP)+16,r12
- mov r21=r13 // get current
- ;;
- ld8 r31=[r16],16 // load cr.ipsr
- ld8 r30=[r17],16 // load cr.iip
- ;;
- ld8 r29=[r16],16 // load cr.ifs
- ld8 r28=[r17],16 // load ar.unat
- ;;
- ld8 r27=[r16],16 // load ar.pfs
- ld8 r26=[r17],16 // load ar.rsc
- ;;
- ld8 r25=[r16],16 // load ar.rnat
- ld8 r24=[r17],16 // load ar.bspstore
- ;;
- ld8 r23=[r16],16 // load predicates
- ld8 r22=[r17],16 // load b0
- ;;
- ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
- ld8.fill r1=[r17],16 //load r1
- ;;
- ld8.fill r12=[r16],16 //load r12
- ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
- ;;
- ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
- ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
- ;;
- ld8.fill r3=[r16] //load r3
- ld8 r18=[r17] //load ar_ccv
- ;;
- mov ar.fpsr=r19
- mov ar.ccv=r18
- shr.u r18=r20,16
- ;;
-vmx_rbs_switch:
- movl r19= THIS_CPU(ia64_phys_stacked_size_p8)
- ;;
- ld4 r19=[r19]
-
-vmx_dont_preserve_current_frame:
-/*
- * To prevent leaking bits between the hypervisor and guest domain,
- * we must clear the stacked registers in the "invalid" partition here.
- * 5 registers/cycle on McKinley).
- */
-# define pRecurse p6
-# define pReturn p7
-# define Nregs 14
-
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
- sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
- ;;
- mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
- shladd in0=loc1,3,r19
- mov in1=0
- ;;
- TEXT_ALIGN(32)
-vmx_rse_clear_invalid:
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
- add out0=-Nregs*8,in0
- add out1=1,in1 // increment recursion count
- mov loc1=0
- mov loc2=0
- ;;
- mov loc3=0
- mov loc4=0
- mov loc5=0
- mov loc6=0
- mov loc7=0
-(pRecurse) br.call.dptk.few b0=vmx_rse_clear_invalid
- ;;
- mov loc8=0
- mov loc9=0
- cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
- mov loc10=0
- mov loc11=0
-(pReturn) br.ret.dptk.many b0
-
-# undef pRecurse
-# undef pReturn
-
-// loadrs has already been shifted
- alloc r16=ar.pfs,0,0,0,0 // drop current register frame
- ;;
- loadrs
- ;;
- mov ar.bspstore=r24
- ;;
- mov ar.unat=r28
- mov ar.rnat=r25
- mov ar.rsc=r26
- ;;
- mov cr.ipsr=r31
- mov cr.iip=r30
-(pNonSys) mov cr.ifs=r29
- mov ar.pfs=r27
- adds r18=IA64_VPD_BASE_OFFSET,r21
- ;;
- ld8 r18=[r18] //vpd
- adds r17=IA64_VCPU_ISR_OFFSET,r21
- ;;
- ld8 r17=[r17]
- adds r19=VPD(VPSR),r18
- ;;
- ld8 r19=[r19] //vpsr
- ;;
-//vsa_sync_write_start
- movl r24=ia64_vmm_entry // calculate return address
- mov r25=r18
- br.sptk.many vmx_vps_sync_write // call the service
- ;;
-END(ia64_leave_hypervisor)
-// fall through
-
-
-GLOBAL_ENTRY(ia64_vmm_entry)
-/*
- * must be at bank 0
- * parameter:
- * r17:cr.isr
- * r18:vpd
- * r19:vpsr
- * r22:b0
- * r23:predicate
- */
- mov r24=r22
- mov r25=r18
- tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
- (p1) br.cond.sptk.few vmx_vps_resume_normal
- (p2) br.cond.sptk.many vmx_vps_resume_handler
- ;;
-END(ia64_vmm_entry)
-
-
-/*
- * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
- * need to switch to bank 0 and doesn't restore the scratch registers.
- * To avoid leaking kernel bits, the scratch registers are set to
- * the following known-to-be-safe values:
- *
- * r1: restored (global pointer)
- * r2: cleared
- * r3: 1 (when returning to user-level)
- * r8-r11: restored (syscall return value(s))
- * r12: restored (user-level stack pointer)
- * r13: restored (user-level thread pointer)
- * r14: set to __kernel_syscall_via_epc
- * r15: restored (syscall #)
- * r16-r17: cleared
- * r18: user-level b6
- * r19: cleared
- * r20: user-level ar.fpsr
- * r21: user-level b0
- * r22: cleared
- * r23: user-level ar.bspstore
- * r24: user-level ar.rnat
- * r25: user-level ar.unat
- * r26: user-level ar.pfs
- * r27: user-level ar.rsc
- * r28: user-level ip
- * r29: user-level psr
- * r30: user-level cfm
- * r31: user-level pr
- * f6-f11: cleared
- * pr: restored (user-level pr)
- * b0: restored (user-level rp)
- * b6: restored
- * b7: set to __kernel_syscall_via_epc
- * ar.unat: restored (user-level ar.unat)
- * ar.pfs: restored (user-level ar.pfs)
- * ar.rsc: restored (user-level ar.rsc)
- * ar.rnat: restored (user-level ar.rnat)
- * ar.bspstore: restored (user-level ar.bspstore)
- * ar.fpsr: restored (user-level ar.fpsr)
- * ar.ccv: cleared
- * ar.csd: cleared
- * ar.ssd: cleared
- */
-GLOBAL_ENTRY(ia64_leave_hypercall)
- PT_REGS_UNWIND_INFO(0)
- /*
- * work.need_resched etc. mustn't get changed by this CPU before it returns to
- * user- or fsys-mode, hence we disable interrupts early on.
- *
- * p6 controls whether current_thread_info()->flags needs to be check for
- * extra work. We always check for extra work when returning to user-level.
- * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
- * is 0. After extra work processing has been completed, execution
- * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
- * needs to be redone.
- */
- ;;
- adds r16=PT(R8)+16,r12
- ;;
- st8 [r16]=r8
- ;;
-//(pUStk) rsm psr.i
- rsm psr.i
- cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
-//(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
- ;;
- br.call.sptk.many b0=leave_hypervisor_tail
-.work_processed_syscall:
- //clean up bank 1 registers
- ;;
- adds r16=PT(R8)+16,r12
- ;;
- ld8 r8=[r16]
- ;;
- mov r16=r0
- mov r17=r0
- mov r18=r0
- mov r19=r0
- mov r20=r0
- mov r21=r0
- mov r22=r0
- mov r23=r0
- mov r24=r0
- mov r25=r0
- mov r26=r0
- mov r27=r0
- mov r28=r0
- mov r29=r0
- mov r30=r0
- mov r31=r0
- bsw.0
- ;;
- adds r2=PT(LOADRS)+16,r12
- adds r3=PT(AR_BSPSTORE)+16,r12
-#ifndef XEN
- adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
-(p6) ld4 r31=[r18] // load current_thread_info()->flags
-#endif
- ;;
- ld8 r20=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
- nop.i 0
- ;;
-// mov r16=ar.bsp // M2 get existing backing store pointer
- ld8 r18=[r2],PT(R9)-PT(B6) // load b6
-#ifndef XEN
-(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
-#endif
- ;;
- ld8 r24=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
-#ifndef XEN
-(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
-(p6) br.cond.spnt .work_pending_syscall
-#endif
- ;;
- // start restoring the state saved on the kernel stack (struct pt_regs):
- ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
- ld8 r11=[r3],PT(CR_IIP)-PT(R11)
-//(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
- ;;
- invala // M0|1 invalidate ALAT
- rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection
-#ifndef XEN
- cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
-#endif
-
- ld8 r31=[r2],32 // M0|1 load cr.ipsr
- ld8 r30=[r3],16 // M0|1 load cr.iip
- ;;
-// ld8 r29=[r2],16 // M0|1 load cr.ifs
- ld8 r28=[r3],16 // M0|1 load ar.unat
-//(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
- ;;
- ld8 r27=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
-//(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
- nop 0
- ;;
- ld8 r22=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
- ld8 r26=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
- mov f6=f0 // F clear f6
- ;;
- ld8 r25=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
- ld8 r23=[r3],PT(R1)-PT(PR) // M0|1 load predicates
- mov f7=f0 // F clear f7
- ;;
- ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
- ld8.fill r1=[r3],16 // M0|1 load r1
-//(pUStk) mov r17=1 // A
- ;;
-//(pUStk) st1 [r14]=r17 // M2|3
- ld8.fill r13=[r3],16 // M0|1
- mov f8=f0 // F clear f8
- ;;
- ld8.fill r12=[r2] // M0|1 restore r12 (sp)
-#ifdef XEN
- ld8.fill r2=[r3] // M0|1
-#else
- ld8.fill r15=[r3] // M0|1 restore r15
-#endif
- mov b6=r18 // I0 restore b6
- mov ar.fpsr=r20
-// addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
- mov f9=f0 // F clear f9
-//(pKStk) br.cond.dpnt.many skip_rbs_switch // B
-
-// srlz.d // M0 ensure interruption collection is off (for cover)
-// shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
- mov r3=r21
- cover // B add current frame into dirty partition & set cr.ifs
- ;;
-//(pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8
- mov r19=ar.bsp // M2 get new backing store pointer
- addl r18=IA64_RBS_OFFSET, r3
- ;;
- mov r3=r0
- sub r18=r19,r18 // get byte size of existing "dirty" partition
- ;;
- shl r20=r18,16 // set rsc.load
- mov f10=f0 // F clear f10
-#ifdef XEN
- mov r14=r0
-#else
- movl r14=__kernel_syscall_via_epc // X
-#endif
- ;;
- mov.m ar.csd=r0 // M2 clear ar.csd
- mov.m ar.ccv=r0 // M2 clear ar.ccv
- mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
-
- mov.m ar.ssd=r0 // M2 clear ar.ssd
- mov f11=f0 // F clear f11
- br.cond.sptk.many vmx_rbs_switch // B
-END(ia64_leave_hypercall)
-
-
-/*
- * in0: new rr7
- * in1: virtual address of guest_vhpt
- * in2: virtual addres of guest shared_info
- * r8: will contain old rid value
- */
-
-#define PSR_BITS_TO_CLEAR \
- (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | \
- IA64_PSR_RT | IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | \
- IA64_PSR_ED | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_IC)
-#define PSR_BITS_TO_SET IA64_PSR_BN
-
-GLOBAL_ENTRY(__vmx_switch_rr7)
- // not sure this unwind statement is correct...
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 4, 7, 0, 0
-1:{
- mov r28 = in0 // copy procedure index
- mov r8 = ip // save ip to compute branch
- mov loc0 = rp // save rp
-};;
- .body
- movl loc2=PERCPU_ADDR
- ;;
- tpa loc2 = loc2 // get physical address of per cpu date
- tpa r3 = r8 // get physical address of ip
- dep loc5 = 0,in1,60,4 // get physical address of guest_vhpt
- dep loc6 = 0,in2,60,4 // get physical address of privregs
- ;;
- dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
- // mask granule shift
- mov loc4 = psr // save psr
- ;;
- mov loc3 = ar.rsc // save RSE configuration
- ;;
- mov ar.rsc = 0 // put RSE in enforced lazy, LE mode
- movl r16=PSR_BITS_TO_CLEAR
- movl r17=PSR_BITS_TO_SET
- ;;
- or loc4 = loc4,r17 // add in psr the bits to set
- ;;
- andcm r16=loc4,r16 // removes bits to clear from psr
- br.call.sptk.many rp=ia64_switch_mode_phys
-1:
- // now in physical mode with psr.i/ic off so do rr7 switch
- dep r16=-1,r0,61,3
- ;;
- mov rr[r16]=in0
- ;;
- srlz.d
- ;;
-
- // re-pin mappings for kernel text and data
- mov r18=KERNEL_TR_PAGE_SHIFT<<2
- movl r17=KERNEL_START
- ;;
- ptr.i r17,r18
- ptr.d r17,r18
- ;;
- mov cr.itir=r18
- mov cr.ifa=r17
- mov r16=IA64_TR_KERNEL
- movl r25 = PAGE_KERNEL
- // r2=KERNEL_TR_PAGE_SHIFT truncated physicall address of ip
- // = ia64_tpa(ip) & (KERNEL_TR_PAGE_SIZE - 1)
- dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
- ;;
- or r24=r2,r25
- ;;
- srlz.i
- ;;
- itr.i itr[r16]=r24
- ;;
- itr.d dtr[r16]=r24
- ;;
-
- // re-pin mapping for stack (current)
- mov r26=IA64_GRANULE_SHIFT<<2
- dep r21=0,r13,60,4 // physical address of "current"
- ;;
- ptr.d r13,r26
- or r23=r21,r25 // construct PA | page properties
- mov cr.itir=r26
- mov cr.ifa=r13 // VA of next task...
- mov r18=IA64_TR_CURRENT_STACK
- ;;
- itr.d dtr[r18]=r23 // wire in new mapping...
-
- // re-pin mappings for per-cpu data
- movl r22 = PERCPU_ADDR
- ;;
- mov r24=IA64_TR_PERCPU_DATA
- or loc2 = r25,loc2 // construct PA | page properties
- mov r23=PERCPU_PAGE_SHIFT<<2
- ;;
- ptr.d r22,r23
- ;;
- mov cr.itir=r23
- mov cr.ifa=r22
- ;;
- itr.d dtr[r24]=loc2 // wire in new mapping...
- ;;
-
- // re-pin mappings for guest_vhpt
- // unless overlaps with IA64_TR_CURRENT_STACK
- // r21 = (current physical addr) & (IA64_GRANULE_SIZE - 1)
- dep r21=0,r21,0,IA64_GRANULE_SHIFT
- // r17 = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
- dep r17=0,loc5,0,IA64_GRANULE_SHIFT
- ;;
- cmp.eq p7,p0=r17,r21 // check overlap with current stack
-(p7) br.cond.sptk .vhpt_overlaps
- mov r24=IA64_TR_VHPT
- ;;
- or loc5 = r25,loc5 // construct PA | page properties
- mov r23 = IA64_GRANULE_SHIFT <<2
- ;;
- ptr.d in1,r23
- ;;
- mov cr.itir=r23
- mov cr.ifa=in1
- ;;
- itr.d dtr[r24]=loc5 // wire in new mapping...
- ;;
-.vhpt_overlaps:
-
- // r16, r19, r20 are used by
- // ia64_switch_mode_phys()/ia64_switch_mode_virt()
- // re-pin mappings for privregs
- // r21 = (current physical addr) & (IA64_GRANULE_SIZE - 1)
- // r17 = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
- // loc6 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
- cmp.ne.unc p7,p0=r21,loc6 // check overlap with current stack
- ;;
-(p7) cmp.ne.unc p8,p0=r17,loc6 // check overlap with guest_vhpt
- ;;
- // loc6 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | PAGE_KERNEL
- or loc6 = r25,loc6 // construct PA | page properties
- ;;
- mov r22=IA64_TR_VPD
- mov r24=IA64_TR_MAPPED_REGS
- mov r23=IA64_GRANULE_SHIFT<<2
- ;;
- ptr.i in2,r23
-(p8) ptr.d in2,r23
- mov cr.itir=r23
- mov cr.ifa=in2
- ;;
- itr.i itr[r22]=loc6 // wire in new mapping...
- ;;
-(p8) itr.d dtr[r24]=loc6 // wire in new mapping...
- ;;
-
- // done, switch back to virtual and return
- mov r16=loc4 // r16= original psr
- br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
- mov ar.rsc=loc3 // restore RSE configuration
- srlz.d // seralize restoration of psr.l
- br.ret.sptk.many rp
-END(__vmx_switch_rr7)
diff --git a/xen/arch/ia64/vmx/vmx_fault.c b/xen/arch/ia64/vmx/vmx_fault.c
deleted file mode 100644
index 642d3120c2..0000000000
--- a/xen/arch/ia64/vmx/vmx_fault.c
+++ /dev/null
@@ -1,604 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_fault.c: handling VMX architecture-related VM exits
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- */
-
-#include <xen/config.h>
-#include <xen/lib.h>
-#include <xen/errno.h>
-#include <xen/sched.h>
-#include <xen/smp.h>
-#include <asm/ptrace.h>
-#include <xen/delay.h>
-
-#include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
-#include <asm/sal.h> /* FOR struct ia64_sal_retval */
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/desc.h>
-#include <asm/vlsapic.h>
-#include <xen/irq.h>
-#include <xen/event.h>
-#include <asm/regionreg.h>
-#include <asm/privop.h>
-#include <asm/ia64_int.h>
-#include <asm/debugger.h>
-#include <asm/dom_fw.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/kregs.h>
-#include <asm/vmx.h>
-#include <asm/vmmu.h>
-#include <asm/vmx_mm_def.h>
-#include <asm/vmx_phy_mode.h>
-#include <xen/mm.h>
-#include <asm/vmx_pal.h>
-#include <asm/shadow.h>
-#include <asm/sioemu.h>
-#include <public/arch-ia64/sioemu.h>
-#include <xen/hvm/irq.h>
-
-/* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
-#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
-
-extern unsigned long handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr);
-
-#define DOMN_PAL_REQUEST 0x110000
-#define DOMN_SAL_REQUEST 0x110001
-
-static const u16 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800,
- 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
- 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
- 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
- 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
- 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
- 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
- 0x7f00
-};
-
-void vmx_lazy_load_fpu(struct vcpu *vcpu)
-{
- if (FP_PSR(vcpu) & IA64_PSR_DFH) {
- FP_PSR(vcpu) = IA64_PSR_MFH;
- if (__ia64_per_cpu_var(fp_owner) != vcpu)
- __ia64_load_fpu(vcpu->arch._thread.fph);
- }
-}
-
-void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
- u64 vec, REGS *regs)
-{
- u64 status, vector;
- VCPU *vcpu = current;
- u64 vpsr = VCPU(vcpu, vpsr);
-
- vector = vec2off[vec];
-
- switch (vec) {
- case 5: // IA64_DATA_NESTED_TLB_VECTOR
- break;
- case 22: // IA64_INST_ACCESS_RIGHTS_VECTOR
- if (!(vpsr & IA64_PSR_IC))
- goto nested_fault;
- if (vhpt_access_rights_fixup(vcpu, ifa, 0))
- return;
- break;
-
- case 25: // IA64_DISABLED_FPREG_VECTOR
- if (!(vpsr & IA64_PSR_IC))
- goto nested_fault;
- vmx_lazy_load_fpu(vcpu);
- if (!(VCPU(vcpu, vpsr) & IA64_PSR_DFH)) {
- regs->cr_ipsr &= ~IA64_PSR_DFH;
- return;
- }
-
- break;
-
- case 32: // IA64_FP_FAULT_VECTOR
- if (!(vpsr & IA64_PSR_IC))
- goto nested_fault;
- // handle fpswa emulation
- // fp fault
- status = handle_fpu_swa(1, regs, isr);
- if (!status) {
- vcpu_increment_iip(vcpu);
- return;
- }
- break;
-
- case 33: // IA64_FP_TRAP_VECTOR
- if (!(vpsr & IA64_PSR_IC))
- goto nested_fault;
- //fp trap
- status = handle_fpu_swa(0, regs, isr);
- if (!status)
- return;
- break;
-
- case 29: // IA64_DEBUG_VECTOR
- case 35: // IA64_TAKEN_BRANCH_TRAP_VECTOR
- case 36: // IA64_SINGLE_STEP_TRAP_VECTOR
- if (vmx_guest_kernel_mode(regs)
- && current->domain->debugger_attached) {
- domain_pause_for_debugger();
- return;
- }
- if (!(vpsr & IA64_PSR_IC))
- goto nested_fault;
- break;
-
- default:
- if (!(vpsr & IA64_PSR_IC))
- goto nested_fault;
- break;
- }
- VCPU(vcpu,isr) = isr;
- VCPU(vcpu,iipa) = regs->cr_iip;
- if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
- VCPU(vcpu,iim) = iim;
- else
- set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
- inject_guest_interruption(vcpu, vector);
- return;
-
- nested_fault:
- panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
-}
-
-
-IA64FAULT
-vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
-{
- struct domain *d = current->domain;
- struct vcpu *v = current;
-
- perfc_incr(vmx_ia64_handle_break);
-#ifdef CRASH_DEBUG
- if ((iim == 0 || iim == CDB_BREAK_NUM) && !vmx_user_mode(regs) &&
- IS_VMM_ADDRESS(regs->cr_iip)) {
- if (iim == 0)
- show_registers(regs);
- debugger_trap_fatal(0 /* don't care */, regs);
- regs_increment_iip(regs);
- return IA64_NO_FAULT;
- }
-#endif
- if (!vmx_user_mode(regs)) {
- show_registers(regs);
- gdprintk(XENLOG_DEBUG, "%s:%d imm %lx\n", __func__, __LINE__, iim);
- ia64_fault(11 /* break fault */, isr, ifa, iim,
- 0 /* cr.itir */, 0, 0, 0, (unsigned long)regs);
- }
-
- if (ia64_psr(regs)->cpl == 0) {
- /* Allow hypercalls only when cpl = 0. */
-
- /* Only common hypercalls are handled by vmx_break_fault. */
- if (iim == d->arch.breakimm) {
- ia64_hypercall(regs);
- vcpu_increment_iip(v);
- return IA64_NO_FAULT;
- }
-
- /* normal hypercalls are handled by vmx_break_fault */
- BUG_ON(iim == d->arch.breakimm);
-
- if (iim == DOMN_PAL_REQUEST) {
- pal_emul(v);
- vcpu_increment_iip(v);
- return IA64_NO_FAULT;
- } else if (iim == DOMN_SAL_REQUEST) {
- if (d->arch.is_sioemu)
- sioemu_sal_assist(v);
- else {
- sal_emul(v);
- vcpu_increment_iip(v);
- }
- return IA64_NO_FAULT;
- }
- }
- vmx_reflect_interruption(ifa, isr, iim, 11, regs);
- return IA64_NO_FAULT;
-}
-
-
-void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
-{
- unsigned long i=0UL, * src,* dst, *sunat, *dunat;
- IA64_PSR vpsr;
-
- src = &regs->r16;
- sunat = &regs->eml_unat;
- vpsr.val = VCPU(v, vpsr);
- if (vpsr.bn) {
- dst = &VCPU(v, vgr[0]);
- dunat =&VCPU(v, vnat);
- __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \
- dep %2 = %0, %2, 0, 16;; \
- st8 [%3] = %2;;"
- ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
-
- } else {
- dst = &VCPU(v, vbgr[0]);
-// dunat =&VCPU(v, vbnat);
-// __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
-// dep %2 = %0, %2, 16, 16;;
-// st8 [%3] = %2;;"
-// ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
-
- }
- for (i = 0; i < 16; i++)
- *dst++ = *src++;
-}
-
-
-// ONLY gets called from ia64_leave_kernel
-// ONLY call with interrupts disabled?? (else might miss one?)
-// NEVER successful if already reflecting a trap/fault because psr.i==0
-void leave_hypervisor_tail(void)
-{
- struct domain *d = current->domain;
- struct vcpu *v = current;
-
- /* FIXME: can this happen ? */
- if (is_idle_domain(current->domain))
- return;
-
- // A softirq may generate an interrupt. So call softirq early.
- local_irq_enable();
- do_softirq();
- local_irq_disable();
-
- // FIXME: Will this work properly if doing an RFI???
- if (d->arch.is_sioemu) {
- if (local_events_need_delivery()) {
- sioemu_deliver_event();
- }
- } else if (v->vcpu_id == 0) {
- unsigned long callback_irq =
- d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ];
-
- if (v->arch.arch_vmx.pal_init_pending) {
- /* inject INIT interruption to guest pal */
- v->arch.arch_vmx.pal_init_pending = 0;
- deliver_pal_init(v);
- return;
- }
-
- /*
- * val[63:56] == 1: val[55:0] is a delivery PCI INTx line:
- * Domain = val[47:32], Bus = val[31:16],
- * DevFn = val[15: 8], IntX = val[ 1: 0]
- * val[63:56] == 0: val[55:0] is a delivery as GSI
- */
- if (callback_irq != 0 && local_events_need_delivery()) {
- /* change level for para-device callback irq */
- /* use level irq to send discrete event */
- if ((uint8_t)(callback_irq >> 56) == 1) {
- /* case of using PCI INTx line as callback irq */
- int pdev = (callback_irq >> 11) & 0x1f;
- int pintx = callback_irq & 3;
- viosapic_set_pci_irq(d, pdev, pintx, 1);
- viosapic_set_pci_irq(d, pdev, pintx, 0);
- } else {
- /* case of using GSI as callback irq */
- viosapic_set_irq(d, callback_irq, 1);
- viosapic_set_irq(d, callback_irq, 0);
- }
- }
- }
-
- rmb();
- if (xchg(&v->arch.irq_new_pending, 0)) {
- v->arch.irq_new_condition = 0;
- vmx_check_pending_irq(v);
- } else if (v->arch.irq_new_condition) {
- v->arch.irq_new_condition = 0;
- vhpi_detection(v);
- }
-}
-
-static int vmx_handle_lds(REGS* regs)
-{
- regs->cr_ipsr |= IA64_PSR_ED;
- return IA64_FAULT;
-}
-
-static inline int unimpl_phys_addr (u64 paddr)
-{
- return (pa_clear_uc(paddr) >> MAX_PHYS_ADDR_BITS) != 0;
-}
-
-/* We came here because the H/W VHPT walker failed to find an entry */
-IA64FAULT
-vmx_hpw_miss(u64 vadr, u64 vec, REGS* regs)
-{
- IA64_PSR vpsr;
- int type;
- u64 vhpt_adr, gppa, pteval, rr, itir;
- ISR misr;
- PTA vpta;
- thash_data_t *data;
- VCPU *v = current;
-
- vpsr.val = VCPU(v, vpsr);
- misr.val = VMX(v,cr_isr);
-
- if (vec == 1 || vec == 3)
- type = ISIDE_TLB;
- else if (vec == 2 || vec == 4)
- type = DSIDE_TLB;
- else
- panic_domain(regs, "wrong vec:%lx\n", vec);
-
- /* Physical mode. */
- if (type == ISIDE_TLB) {
- if (!vpsr.it) {
- if (unlikely(unimpl_phys_addr(vadr))) {
- unimpl_iaddr_trap(v, vadr);
- return IA64_FAULT;
- }
- physical_tlb_miss(v, vadr, type);
- return IA64_FAULT;
- }
- } else { /* DTLB miss. */
- if (!misr.rs) {
- if (!vpsr.dt) {
- u64 pte;
- if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
- return vmx_handle_lds(regs);
- if (unlikely(unimpl_phys_addr(vadr))) {
- unimpl_daddr(v);
- return IA64_FAULT;
- }
- pte = lookup_domain_mpa(v->domain, pa_clear_uc(vadr), NULL);
- if (v->domain != dom0 && (pte & _PAGE_IO)) {
- emulate_io_inst(v, pa_clear_uc(vadr), 4,
- pte_pfn(__pte(pte)));
- return IA64_FAULT;
- }
- physical_tlb_miss(v, vadr, type);
- return IA64_FAULT;
- }
- } else { /* RSE fault. */
- if (!vpsr.rt) {
- if (unlikely(unimpl_phys_addr(vadr))) {
- unimpl_daddr(v);
- return IA64_FAULT;
- }
- physical_tlb_miss(v, vadr, type);
- return IA64_FAULT;
- }
- }
- }
-
-try_again:
- /* Search in VTLB. */
- data = vtlb_lookup(v, vadr, type);
- if (data != 0) {
- /* Found. */
- if (v->domain != dom0 && type == DSIDE_TLB) {
- u64 pte;
- if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */
- if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE))
- return vmx_handle_lds(regs);
- }
- gppa = thash_translate(data, vadr);
- pte = lookup_domain_mpa(v->domain, gppa, NULL);
- if (pte & _PAGE_IO) {
- if (misr.sp)
- panic_domain(NULL, "ld.s on I/O page not with UC attr."
- " pte=0x%lx\n", data->page_flags);
- if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
- emulate_io_inst(v, gppa, data->ma,
- pte_pfn(__pte(pte)));
- else {
- vcpu_set_isr(v, misr.val);
- data_access_rights(v, vadr);
- }
- return IA64_FAULT;
- }
- }
- thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
- return IA64_NO_FAULT;
- }
-
- if (type == DSIDE_TLB) {
- struct opt_feature* optf = &(v->domain->arch.opt_feature);
-
- if (misr.sp)
- return vmx_handle_lds(regs);
-
- vcpu_get_rr(v, vadr, &rr);
- itir = rr & (RR_RID_MASK | RR_PS_MASK);
-
- if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
- /* windows use region 4 and 5 for identity mapping */
- if ((optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4_FLG) &&
- REGION_NUMBER(vadr) == 4 && !(regs->cr_ipsr & IA64_PSR_CPL) &&
- REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) {
-
- pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) |
- optf->im_reg4.pgprot;
- if (thash_purge_and_insert(v, pteval, itir, vadr, type))
- goto try_again;
- return IA64_NO_FAULT;
- }
- if ((optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5_FLG) &&
- REGION_NUMBER(vadr) == 5 && !(regs->cr_ipsr & IA64_PSR_CPL) &&
- REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) {
-
- pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) |
- optf->im_reg5.pgprot;
- if (thash_purge_and_insert(v, pteval, itir, vadr, type))
- goto try_again;
- return IA64_NO_FAULT;
- }
- if (vpsr.ic) {
- vcpu_set_isr(v, misr.val);
- alt_dtlb(v, vadr);
- } else {
- nested_dtlb(v);
- }
- return IA64_FAULT;
- }
-
- vpta.val = vmx_vcpu_get_pta(v);
- if (vpta.vf) {
- /* Long format is not yet supported. */
- goto inject_dtlb_fault;
- }
-
- /* avoid recursively walking (short format) VHPT */
- if (!(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4_FLG) &&
- !(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5_FLG) &&
- (((vadr ^ vpta.val) << 3) >> (vpta.size + 3)) == 0) {
- goto inject_dtlb_fault;
- }
-
- vhpt_adr = vmx_vcpu_thash(v, vadr);
- if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
- /* VHPT successfully read. */
- if (!(pteval & _PAGE_P)) {
- goto inject_dtlb_fault;
- } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
- thash_purge_and_insert(v, pteval, itir, vadr, DSIDE_TLB);
- return IA64_NO_FAULT;
- }
- goto inject_dtlb_fault;
- } else {
- /* Can't read VHPT. */
- if (vpsr.ic) {
- vcpu_set_isr(v, misr.val);
- dvhpt_fault(v, vadr);
- return IA64_FAULT;
- } else {
- nested_dtlb(v);
- return IA64_FAULT;
- }
- }
- } else if (type == ISIDE_TLB) {
-
- if (!vpsr.ic)
- misr.ni = 1;
-
- /* Don't bother with PHY_D mode (will require rr0+rr4 switches,
- and certainly used only within nested TLB handler (hence TR mapped
- and ic=0). */
- if (!vpsr.dt)
- goto inject_itlb_fault;
-
- if (!vhpt_enabled(v, vadr, INST_REF)) {
- vcpu_set_isr(v, misr.val);
- alt_itlb(v, vadr);
- return IA64_FAULT;
- }
-
- vpta.val = vmx_vcpu_get_pta(v);
- if (vpta.vf) {
- /* Long format is not yet supported. */
- goto inject_itlb_fault;
- }
-
-
- vhpt_adr = vmx_vcpu_thash(v, vadr);
- if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
- /* VHPT successfully read. */
- if (pteval & _PAGE_P) {
- if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
- goto inject_itlb_fault;
- }
- vcpu_get_rr(v, vadr, &rr);
- itir = rr & (RR_RID_MASK | RR_PS_MASK);
- thash_purge_and_insert(v, pteval, itir, vadr, ISIDE_TLB);
- return IA64_NO_FAULT;
- } else {
- vcpu_set_isr(v, misr.val);
- inst_page_not_present(v, vadr);
- return IA64_FAULT;
- }
- } else {
- vcpu_set_isr(v, misr.val);
- ivhpt_fault(v, vadr);
- return IA64_FAULT;
- }
- }
- return IA64_NO_FAULT;
-
- inject_dtlb_fault:
- if (vpsr.ic) {
- vcpu_set_isr(v, misr.val);
- dtlb_fault(v, vadr);
- } else
- nested_dtlb(v);
-
- return IA64_FAULT;
-
- inject_itlb_fault:
- vcpu_set_isr(v, misr.val);
- itlb_fault(v, vadr);
- return IA64_FAULT;
-}
-
-void
-vmx_ia64_shadow_fault(u64 ifa, u64 isr, u64 mpa, REGS *regs)
-{
- struct vcpu *v = current;
- struct domain *d = v->domain;
- u64 gpfn, pte;
- thash_data_t *data;
-
- if (!shadow_mode_enabled(d))
- goto inject_dirty_bit;
-
- gpfn = get_gpfn_from_mfn(mpa >> PAGE_SHIFT);
- data = vhpt_lookup(ifa);
- if (data) {
- pte = data->page_flags;
- // BUG_ON((pte ^ mpa) & (_PAGE_PPN_MASK & PAGE_MASK));
- if (!(pte & _PAGE_VIRT_D))
- goto inject_dirty_bit;
- data->page_flags = pte | _PAGE_D;
- } else {
- data = vtlb_lookup(v, ifa, DSIDE_TLB);
- if (data) {
- if (!(data->page_flags & _PAGE_VIRT_D))
- goto inject_dirty_bit;
- }
- pte = 0;
- }
-
- /* Set the dirty bit in the bitmap. */
- shadow_mark_page_dirty(d, gpfn);
-
- /* Retry */
- atomic64_inc(&d->arch.shadow_fault_count);
- ia64_ptcl(ifa, PAGE_SHIFT << 2);
- return;
-
-inject_dirty_bit:
- /* Reflect. no need to purge. */
- VCPU(v, isr) = isr;
- set_ifa_itir_iha (v, ifa, 1, 1, 1);
- inject_guest_interruption(v, IA64_DIRTY_BIT_VECTOR);
- return;
-}
diff --git a/xen/arch/ia64/vmx/vmx_hypercall.c b/xen/arch/ia64/vmx/vmx_hypercall.c
deleted file mode 100644
index 2fde2f36eb..0000000000
--- a/xen/arch/ia64/vmx/vmx_hypercall.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_hyparcall.c: handling hypercall from domain
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- */
-
-#include <xen/config.h>
-#include <xen/errno.h>
-#include <asm/vmx_vcpu.h>
-#include <xen/guest_access.h>
-#include <public/event_channel.h>
-#include <asm/vmmu.h>
-#include <asm/tlb.h>
-#include <asm/regionreg.h>
-#include <asm/page.h>
-#include <xen/mm.h>
-#include <xen/multicall.h>
-#include <xen/hypercall.h>
-#include <public/version.h>
-#include <asm/dom_fw.h>
-#include <xen/domain.h>
-#include <asm/vmx.h>
-#include <asm/viosapic.h>
-
-static int hvmop_set_isa_irq_level(
- XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
-{
- struct xen_hvm_set_isa_irq_level op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- if ( op.isa_irq > 15 )
- return -EINVAL;
-
- rc = rcu_lock_target_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = 0;
- viosapic_set_irq(d, op.isa_irq, op.level);
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
-static int hvmop_set_pci_intx_level(
- XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop)
-{
- struct xen_hvm_set_pci_intx_level op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
- return -EINVAL;
-
- rc = rcu_lock_target_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = 0;
- viosapic_set_pci_irq(d, op.device, op.intx, op.level);
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
-
-
-long
-do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
-{
- long rc = 0;
-
- switch (op) {
- case HVMOP_set_param:
- case HVMOP_get_param:
- {
- struct xen_hvm_param a;
- struct domain *d;
-
- if (copy_from_guest(&a, arg, 1))
- return -EFAULT;
-
- if (a.index >= HVM_NR_PARAMS)
- return -EINVAL;
-
- rc = rcu_lock_target_domain_by_id(a.domid, &d);
- if (rc != 0)
- return rc;
-
- if (op == HVMOP_set_param) {
- struct vmx_ioreq_page *iorp;
- struct vcpu *v;
-
- switch (a.index) {
- case HVM_PARAM_IOREQ_PFN:
- iorp = &d->arch.hvm_domain.ioreq;
- rc = vmx_set_ioreq_page(d, iorp, a.value);
- spin_lock(&iorp->lock);
- if (rc == 0 && iorp->va != NULL)
- /* Initialise evtchn port info if VCPUs already created. */
- for_each_vcpu(d, v)
- get_vio(v)->vp_eport = v->arch.arch_vmx.xen_port;
- spin_unlock(&iorp->lock);
- break;
- case HVM_PARAM_BUFIOREQ_PFN:
- iorp = &d->arch.hvm_domain.buf_ioreq;
- rc = vmx_set_ioreq_page(d, iorp, a.value);
- break;
- case HVM_PARAM_BUFPIOREQ_PFN:
- iorp = &d->arch.hvm_domain.buf_pioreq;
- rc = vmx_set_ioreq_page(d, iorp, a.value);
- break;
- case HVM_PARAM_DM_DOMAIN:
- if (a.value == DOMID_SELF)
- a.value = current->domain->domain_id;
- rc = a.value ? -EINVAL : 0; /* no stub domain support */
- break;
- default:
- /* nothing */
- break;
- }
- if (rc == 0)
- d->arch.hvm_domain.params[a.index] = a.value;
- }
- else {
- a.value = d->arch.hvm_domain.params[a.index];
- rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
- }
-
- rcu_unlock_domain(d);
- break;
- }
-
- case HVMOP_set_pci_intx_level:
- rc = hvmop_set_pci_intx_level(
- guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
- break;
-
- case HVMOP_set_isa_irq_level:
- rc = hvmop_set_isa_irq_level(
- guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
- break;
-
- case HVMOP_set_pci_link_route:
- rc = 0;
- break;
-
- case HVMOP_track_dirty_vram:
- rc = -ENOSYS;
- break;
-
- case HVMOP_modified_memory:
- {
- struct xen_hvm_modified_memory a;
- struct domain *d;
- unsigned long pfn;
-
- if ( copy_from_guest(&a, arg, 1) )
- return -EFAULT;
-
- rc = rcu_lock_target_domain_by_id(a.domid, &d);
- if ( rc != 0 )
- break;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto param_fail3;
-
- rc = -EINVAL;
- if ( a.first_pfn > domain_get_maximum_gpfn(d)
- || a.first_pfn + a.nr - 1 < a.first_pfn
- || a.first_pfn + a.nr - 1 > domain_get_maximum_gpfn(d))
- goto param_fail3;
-
- rc = 0;
- if ( !d->arch.shadow_bitmap )
- goto param_fail3;
-
- for (pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++)
- if (pfn < d->arch.shadow_bitmap_size)
- set_bit(pfn, d->arch.shadow_bitmap);
-
- param_fail3:
- rcu_unlock_domain(d);
- break;
- }
-
- case HVMOP_get_mem_type:
- case HVMOP_set_mem_type:
- case HVMOP_set_mem_access:
- case HVMOP_get_mem_access:
-
- rc = -ENOSYS;
- break;
-
- default:
- gdprintk(XENLOG_INFO, "Bad HVM op %ld.\n", op);
- rc = -ENOSYS;
- }
- return rc;
-}
diff --git a/xen/arch/ia64/vmx/vmx_init.c b/xen/arch/ia64/vmx/vmx_init.c
deleted file mode 100644
index b4f52f15f3..0000000000
--- a/xen/arch/ia64/vmx/vmx_init.c
+++ /dev/null
@@ -1,661 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_init.c: initialization work for vt specific domain
- * Copyright (c) 2005, Intel Corporation.
- * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
- * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
- * Fred Yang <fred.yang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-/*
- * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@intel.com>:
- * Disable doubling mapping
- *
- * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
- * Simplied design in first step:
- * - One virtual environment
- * - Domain is bound to one LP
- * Later to support guest SMP:
- * - Need interface to handle VP scheduled to different LP
- */
-#include <xen/config.h>
-#include <xen/types.h>
-#include <xen/sched.h>
-#include <asm/pal.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/vmx_vcpu.h>
-#include <xen/lib.h>
-#include <asm/vmmu.h>
-#include <public/xen.h>
-#include <public/hvm/ioreq.h>
-#include <public/event_channel.h>
-#include <public/arch-ia64/hvm/memmap.h>
-#include <asm/vmx_phy_mode.h>
-#include <asm/processor.h>
-#include <asm/vmx.h>
-#include <xen/mm.h>
-#include <asm/viosapic.h>
-#include <xen/event.h>
-#include <asm/vlsapic.h>
-#include <asm/vhpt.h>
-#include <asm/vmx_pal_vsa.h>
-#include <asm/patch.h>
-
-/* Global flag to identify whether Intel vmx feature is on */
-u32 vmx_enabled = 0;
-static u64 buffer_size;
-static u64 vp_env_info;
-static u64 vm_buffer = 0; /* Buffer required to bring up VMX feature */
-u64 __vsa_base = 0; /* Run-time service base of VMX */
-
-/* Check whether vt feature is enabled or not. */
-
-void vmx_vps_patch(void)
-{
- u64 addr;
-
- addr = (u64)&vmx_vps_sync_read;
- ia64_patch_imm64(addr, __vsa_base+PAL_VPS_SYNC_READ);
- ia64_fc((void *)addr);
- addr = (u64)&vmx_vps_sync_write;
- ia64_patch_imm64(addr, __vsa_base+PAL_VPS_SYNC_WRITE);
- ia64_fc((void *)addr);
- addr = (u64)&vmx_vps_resume_normal;
- ia64_patch_imm64(addr, __vsa_base+PAL_VPS_RESUME_NORMAL);
- ia64_fc((void *)addr);
- addr = (u64)&vmx_vps_resume_handler;
- ia64_patch_imm64(addr, __vsa_base+PAL_VPS_RESUME_HANDLER);
- ia64_fc((void *)addr);
- ia64_sync_i();
- ia64_srlz_i();
-}
-
-
-void
-identify_vmx_feature(void)
-{
- pal_status_t ret;
- u64 avail = 1, status = 1, control = 1;
-
- vmx_enabled = 0;
- /* Check VT-i feature */
- ret = ia64_pal_proc_get_features(&avail, &status, &control);
- if (ret != PAL_STATUS_SUCCESS) {
- printk("Get proc features failed.\n");
- goto no_vti;
- }
-
- /* FIXME: do we need to check status field, to see whether
- * PSR.vm is actually enabled? If yes, aonther call to
- * ia64_pal_proc_set_features may be reuqired then.
- */
- printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n",
- avail, status, control, avail & PAL_PROC_VM_BIT);
- if (!(avail & PAL_PROC_VM_BIT)) {
- printk("No VT feature supported.\n");
- goto no_vti;
- }
-
- ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info);
- if (ret != PAL_STATUS_SUCCESS) {
- printk("Get vp environment info failed.\n");
- goto no_vti;
- }
-
- printk("vm buffer size: %ld\n", buffer_size);
-
- vmx_enabled = 1;
-no_vti:
- return;
-}
-
-/*
- * ** This function must be called on every processor **
- *
- * Init virtual environment on current LP
- * vsa_base is the indicator whether it's first LP to be initialized
- * for current domain.
- */
-void*
-vmx_init_env(void *start, unsigned long end_in_pa)
-{
- u64 status, tmp_base;
-
- if (!vm_buffer) {
- /* VM buffer must must be 4K aligned and
- * must be pinned by both itr and dtr. */
-#define VM_BUFFER_ALIGN (4 * 1024)
-#define VM_BUFFER_ALIGN_UP(x) (((x) + (VM_BUFFER_ALIGN - 1)) & \
- ~(VM_BUFFER_ALIGN - 1))
- unsigned long s_vm_buffer =
- VM_BUFFER_ALIGN_UP((unsigned long)start);
- unsigned long e_vm_buffer = s_vm_buffer + buffer_size;
- if (__pa(e_vm_buffer) < end_in_pa) {
- init_boot_pages(__pa(start), __pa(s_vm_buffer));
- start = (void*)e_vm_buffer;
- vm_buffer = virt_to_xenva(s_vm_buffer);
- printk("vm_buffer: 0x%lx\n", vm_buffer);
- } else {
- printk("Can't allocate vm_buffer "
- "start 0x%p end_in_pa 0x%lx "
- "buffer_size 0x%lx\n",
- start, end_in_pa, buffer_size);
- vmx_enabled = 0;
- return start;
- }
- }
-
- status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
- __pa(vm_buffer),
- vm_buffer,
- &tmp_base);
-
- if (status != PAL_STATUS_SUCCESS) {
- printk("ia64_pal_vp_init_env failed.\n");
- vmx_enabled = 0;
- return start;
- }
-
- if (!__vsa_base){
- __vsa_base = tmp_base;
- vmx_vps_patch();
- }
- else
- ASSERT(tmp_base == __vsa_base);
-
- return start;
-}
-
-typedef union {
- u64 value;
- struct {
- u64 number : 8;
- u64 revision : 8;
- u64 model : 8;
- u64 family : 8;
- u64 archrev : 8;
- u64 rv : 24;
- };
-} cpuid3_t;
-
-/* Allocate vpd from domheap */
-static vpd_t *alloc_vpd(void)
-{
- int i;
- cpuid3_t cpuid3;
- struct page_info *page;
- vpd_t *vpd;
- mapped_regs_t *mregs;
-
- page = alloc_domheap_pages(NULL, get_order(VPD_SIZE), 0);
- if (page == NULL) {
- printk("VPD allocation failed.\n");
- return NULL;
- }
- vpd = page_to_virt(page);
-
- printk(XENLOG_DEBUG "vpd base: 0x%p, vpd size:%ld\n",
- vpd, sizeof(vpd_t));
- memset(vpd, 0, VPD_SIZE);
- mregs = &vpd->vpd_low;
-
- /* CPUID init */
- for (i = 0; i < 5; i++)
- mregs->vcpuid[i] = ia64_get_cpuid(i);
-
- /* Limit the CPUID number to 5 */
- cpuid3.value = mregs->vcpuid[3];
- cpuid3.number = 4; /* 5 - 1 */
- mregs->vcpuid[3] = cpuid3.value;
-
- mregs->vac.a_from_int_cr = 1;
- mregs->vac.a_to_int_cr = 1;
- mregs->vac.a_from_psr = 1;
- mregs->vac.a_from_cpuid = 1;
- mregs->vac.a_cover = 1;
- mregs->vac.a_bsw = 1;
- mregs->vac.a_int = 1;
- mregs->vdc.d_vmsw = 1;
-
- return vpd;
-}
-
-/* Free vpd to domheap */
-static void
-free_vpd(struct vcpu *v)
-{
- if ( v->arch.privregs )
- free_domheap_pages(virt_to_page(v->arch.privregs),
- get_order(VPD_SIZE));
-}
-
-// This is used for PAL_VP_CREATE and PAL_VPS_SET_PENDING_INTERRUPT
-// so that we don't have to pin the vpd down with itr[].
-void
-__vmx_vpd_pin(struct vcpu* v)
-{
- unsigned long privregs = (unsigned long)v->arch.privregs;
- u64 psr;
-
- privregs &= ~(IA64_GRANULE_SIZE - 1);
-
- // check overlapping with current stack
- if (privregs ==
- ((unsigned long)current & ~(IA64_GRANULE_SIZE - 1)))
- return;
-
- if (!VMX_DOMAIN(current)) {
- // check overlapping with vhpt
- if (privregs ==
- (vcpu_vhpt_maddr(current) & ~(IA64_GRANULE_SHIFT - 1)))
- return;
- } else {
- // check overlapping with vhpt
- if (privregs ==
- ((unsigned long)current->arch.vhpt.hash &
- ~(IA64_GRANULE_SHIFT - 1)))
- return;
-
- // check overlapping with privregs
- if (privregs ==
- ((unsigned long)current->arch.privregs &
- ~(IA64_GRANULE_SHIFT - 1)))
- return;
- }
-
- psr = ia64_clear_ic();
- ia64_ptr(0x2 /*D*/, privregs, IA64_GRANULE_SIZE);
- ia64_srlz_d();
- ia64_itr(0x2 /*D*/, IA64_TR_MAPPED_REGS, privregs,
- pte_val(pfn_pte(__pa(privregs) >> PAGE_SHIFT, PAGE_KERNEL)),
- IA64_GRANULE_SHIFT);
- ia64_set_psr(psr);
- ia64_srlz_d();
-}
-
-void
-__vmx_vpd_unpin(struct vcpu* v)
-{
- if (!VMX_DOMAIN(current)) {
- int rc;
- rc = !set_one_rr(VRN7 << VRN_SHIFT, VCPU(current, rrs[VRN7]));
- BUG_ON(rc);
- } else {
- IA64FAULT fault;
- fault = vmx_vcpu_set_rr(current, VRN7 << VRN_SHIFT,
- VMX(current, vrr[VRN7]));
- BUG_ON(fault != IA64_NO_FAULT);
- }
-}
-
-/*
- * Create a VP on intialized VMX environment.
- */
-static void
-vmx_create_vp(struct vcpu *v)
-{
- u64 ret;
- vpd_t *vpd = (vpd_t *)v->arch.privregs;
- u64 ivt_base;
- extern char vmx_ia64_ivt;
- /* ia64_ivt is function pointer, so need this tranlation */
- ivt_base = (u64) &vmx_ia64_ivt;
- printk(XENLOG_DEBUG "ivt_base: 0x%lx\n", ivt_base);
-
- vmx_vpd_pin(v);
- ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
- vmx_vpd_unpin(v);
-
- if (ret != PAL_STATUS_SUCCESS){
- panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n");
- }
-}
-
-/* Other non-context related tasks can be done in context switch */
-void
-vmx_save_state(struct vcpu *v)
-{
- BUG_ON(v != current);
-
- ia64_call_vsa(PAL_VPS_SAVE, (u64)v->arch.privregs, 1, 0, 0, 0, 0, 0);
-
- /* Need to save KR when domain switch, though HV itself doesn;t
- * use them.
- */
- v->arch.arch_vmx.vkr[0] = ia64_get_kr(0);
- v->arch.arch_vmx.vkr[1] = ia64_get_kr(1);
- v->arch.arch_vmx.vkr[2] = ia64_get_kr(2);
- v->arch.arch_vmx.vkr[3] = ia64_get_kr(3);
- v->arch.arch_vmx.vkr[4] = ia64_get_kr(4);
- v->arch.arch_vmx.vkr[5] = ia64_get_kr(5);
- v->arch.arch_vmx.vkr[6] = ia64_get_kr(6);
- v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);
-}
-
-/* Even guest is in physical mode, we still need such double mapping */
-void
-vmx_load_state(struct vcpu *v)
-{
- BUG_ON(v != current);
-
- vmx_load_all_rr(v);
-
- /* vmx_load_all_rr() pins down v->arch.privregs with both dtr/itr*/
- ia64_call_vsa(PAL_VPS_RESTORE, (u64)v->arch.privregs, 1, 0, 0, 0, 0, 0);
-
- ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
- ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
- ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
- ia64_set_kr(3, v->arch.arch_vmx.vkr[3]);
- ia64_set_kr(4, v->arch.arch_vmx.vkr[4]);
- ia64_set_kr(5, v->arch.arch_vmx.vkr[5]);
- ia64_set_kr(6, v->arch.arch_vmx.vkr[6]);
- ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
- /* Guest vTLB is not required to be switched explicitly, since
- * anchored in vcpu */
-
- migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor);
-}
-
-static int
-vmx_vcpu_initialise(struct vcpu *v)
-{
- struct vmx_ioreq_page *iorp = &v->domain->arch.hvm_domain.ioreq;
-
- int rc = alloc_unbound_xen_event_channel(v, 0, NULL);
- if (rc < 0)
- return rc;
- v->arch.arch_vmx.xen_port = rc;
-
- spin_lock(&iorp->lock);
- if (v->domain->arch.vmx_platform.ioreq.va != 0)
- get_vio(v)->vp_eport = v->arch.arch_vmx.xen_port;
- spin_unlock(&iorp->lock);
-
- gdprintk(XENLOG_INFO, "Allocated port %ld for hvm %d vcpu %d.\n",
- v->arch.arch_vmx.xen_port, v->domain->domain_id, v->vcpu_id);
-
- return 0;
-}
-
-static int vmx_create_event_channels(struct vcpu *v)
-{
- struct vcpu *o;
-
- if (v->vcpu_id == 0) {
- /* Ugly: create event channels for every vcpu when vcpu 0
- starts, so that they're available for ioemu to bind to. */
- for_each_vcpu(v->domain, o) {
- int rc = vmx_vcpu_initialise(o);
- if (rc < 0) //XXX error recovery
- return rc;
- }
- }
-
- return 0;
-}
-
-/*
- * Event channel has destoryed in domain_kill(), so we needn't
- * do anything here
- */
-static void vmx_release_assist_channel(struct vcpu *v)
-{
- return;
-}
-
-/* following three functions are based from hvm_xxx_ioreq_page()
- * in xen/arch/x86/hvm/hvm.c */
-static void vmx_init_ioreq_page(
- struct domain *d, struct vmx_ioreq_page *iorp)
-{
- memset(iorp, 0, sizeof(*iorp));
- spin_lock_init(&iorp->lock);
- domain_pause(d);
-}
-
-static void vmx_destroy_ioreq_page(
- struct domain *d, struct vmx_ioreq_page *iorp)
-{
- spin_lock(&iorp->lock);
-
- ASSERT(d->is_dying);
-
- if (iorp->va != NULL) {
- put_page(iorp->page);
- iorp->page = NULL;
- iorp->va = NULL;
- }
-
- spin_unlock(&iorp->lock);
-}
-
-int vmx_set_ioreq_page(
- struct domain *d, struct vmx_ioreq_page *iorp, unsigned long gpfn)
-{
- struct page_info *page;
- unsigned long mfn;
- pte_t pte;
-
- pte = *lookup_noalloc_domain_pte(d, gpfn << PAGE_SHIFT);
- if (!pte_present(pte) || !pte_mem(pte))
- return -EINVAL;
- mfn = pte_pfn(pte);
- ASSERT(mfn_valid(mfn));
-
- page = mfn_to_page(mfn);
- if (get_page(page, d) == 0)
- return -EINVAL;
-
- spin_lock(&iorp->lock);
-
- if ((iorp->va != NULL) || d->is_dying) {
- spin_unlock(&iorp->lock);
- put_page(page);
- return -EINVAL;
- }
-
- iorp->va = mfn_to_virt(mfn);
- iorp->page = page;
-
- spin_unlock(&iorp->lock);
-
- domain_unpause(d);
-
- return 0;
-}
-
-/*
- * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
- * is registered here.
- */
-int
-vmx_final_setup_guest(struct vcpu *v)
-{
- vpd_t *vpd;
- int rc;
-
- vpd = alloc_vpd();
- ASSERT(vpd);
- if (!vpd)
- return -ENOMEM;
-
- v->arch.privregs = (mapped_regs_t *)vpd;
- vpd->vpd_low.virt_env_vaddr = vm_buffer;
-
- /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
- * to this solution. Maybe it can be deferred until we know created
- * one as vmx domain */
- rc = init_domain_tlb(v);
- if (rc)
- return rc;
-
- if (!v->domain->arch.is_sioemu) {
- rc = vmx_create_event_channels(v);
- if (rc)
- return rc;
- }
-
- /* v->arch.schedule_tail = arch_vmx_do_launch; */
- vmx_create_vp(v);
-
- /* Physical mode emulation initialization, including
- * emulation ID allcation and related memory request
- */
- physical_mode_init(v);
-
- vlsapic_reset(v);
- vtm_init(v);
-
- /* Set up guest 's indicator for VTi domain*/
- set_bit(ARCH_VMX_DOMAIN, &v->arch.arch_vmx.flags);
-
- return 0;
-}
-
-void
-vmx_relinquish_guest_resources(struct domain *d)
-{
- struct vcpu *v;
-
- if (d->arch.is_sioemu)
- return;
-
- for_each_vcpu(d, v)
- vmx_release_assist_channel(v);
-
- vacpi_relinquish_resources(d);
-
- vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.ioreq);
- vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
- vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
-}
-
-void
-vmx_relinquish_vcpu_resources(struct vcpu *v)
-{
- vtime_t *vtm = &(v->arch.arch_vmx.vtm);
-
- kill_timer(&vtm->vtm_timer);
-
- if (v->arch.arch_vmx.sioemu_info_mva)
- put_page(virt_to_page((unsigned long)
- v->arch.arch_vmx.sioemu_info_mva));
-
- free_domain_tlb(v);
- free_vpd(v);
-}
-
-typedef struct io_range {
- unsigned long start;
- unsigned long size;
- unsigned long type;
-} io_range_t;
-
-static const io_range_t io_ranges[] = {
- {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER << PAGE_SHIFT},
- {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO << PAGE_SHIFT},
- {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO << PAGE_SHIFT},
- {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC << PAGE_SHIFT},
- {PIB_START, PIB_SIZE, GPFN_PIB << PAGE_SHIFT},
-};
-
-// The P2M table is built in libxc/ia64/xc_ia64_hvm_build.c @ setup_guest()
-// so only mark IO memory space here
-static void vmx_build_io_physmap_table(struct domain *d)
-{
- unsigned long i, j;
-
- /* Mark I/O ranges */
- for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
- for (j = io_ranges[i].start;
- j < io_ranges[i].start + io_ranges[i].size; j += PAGE_SIZE)
- (void)__assign_domain_page(d, j, io_ranges[i].type,
- ASSIGN_writable | ASSIGN_io);
- }
-
-}
-
-int vmx_setup_platform(struct domain *d)
-{
- ASSERT(d != dom0); /* only for non-privileged vti domain */
-
- if (!d->arch.is_sioemu) {
- vmx_build_io_physmap_table(d);
-
- vmx_init_ioreq_page(d, &d->arch.vmx_platform.ioreq);
- vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
- vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
- }
- /* TEMP */
- d->arch.vmx_platform.pib_base = 0xfee00000UL;
-
- d->arch.sal_data = xmalloc(struct xen_sal_data);
- if (d->arch.sal_data == NULL)
- return -ENOMEM;
-
- /* Only open one port for I/O and interrupt emulation */
- memset(&d->shared_info->evtchn_mask[0], 0xff,
- sizeof(d->shared_info->evtchn_mask));
-
- /* Initialize iosapic model within hypervisor */
- viosapic_init(d);
-
- if (!d->arch.is_sioemu)
- vacpi_init(d);
-
- if (d->arch.is_sioemu) {
- int i;
- for (i = 1; i < XEN_LEGACY_MAX_VCPUS; i++)
- d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
- }
-
- return 0;
-}
-
-void vmx_do_resume(struct vcpu *v)
-{
- ioreq_t *p;
-
- vmx_load_state(v);
-
- if (v->domain->arch.is_sioemu)
- return;
-
- /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
- /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
- p = get_vio(v);
- while (p->state != STATE_IOREQ_NONE) {
- switch (p->state) {
- case STATE_IORESP_READY: /* IORESP_READY -> NONE */
- vmx_io_assist(v);
- break;
- case STATE_IOREQ_READY:
- case STATE_IOREQ_INPROCESS:
- /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
- wait_on_xen_event_channel(v->arch.arch_vmx.xen_port,
- (p->state != STATE_IOREQ_READY) &&
- (p->state != STATE_IOREQ_INPROCESS));
- break;
- default:
- gdprintk(XENLOG_ERR,
- "Weird HVM iorequest state %d.\n", p->state);
- domain_crash_synchronous();
- }
- }
-}
diff --git a/xen/arch/ia64/vmx/vmx_interrupt.c b/xen/arch/ia64/vmx/vmx_interrupt.c
deleted file mode 100644
index 83bb36a907..0000000000
--- a/xen/arch/ia64/vmx/vmx_interrupt.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_interrupt.c: handle inject interruption.
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Shaofan Li (Susue Li) <susie.li@intel.com>
- * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- */
-#include <xen/types.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/vmx_mm_def.h>
-#include <asm/vmx_pal_vsa.h>
-#include <asm/debugger.h>
-
-/* SDM vol2 5.5 - IVA based interruption handling */
-#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
-
-static void
-collect_interruption(VCPU *vcpu)
-{
- u64 ipsr;
- u64 vdcr;
- u64 vifs;
- IA64_PSR vpsr;
- REGS * regs = vcpu_regs(vcpu);
- vpsr.val = vmx_vcpu_get_psr(vcpu);
- vcpu_bsw0(vcpu);
- if(vpsr.ic){
-
- /* Sync mpsr id/da/dd/ss/ed bits to vipsr
- * since after guest do rfi, we still want these bits on in
- * mpsr
- */
-
- ipsr = regs->cr_ipsr;
- vpsr.val = vpsr.val | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
- | IA64_PSR_DD |IA64_PSR_SS |IA64_PSR_ED));
- vcpu_set_ipsr(vcpu, vpsr.val);
-
- /* Currently, for trap, we do not advance IIP to next
- * instruction. That's because we assume caller already
- * set up IIP correctly
- */
-
- vcpu_set_iip(vcpu , regs->cr_iip);
-
- /* set vifs.v to zero */
- vifs = VCPU(vcpu,ifs);
- vifs &= ~IA64_IFS_V;
- vcpu_set_ifs(vcpu, vifs);
-
- vcpu_set_iipa(vcpu, VMX(vcpu,cr_iipa));
- }
-
- vdcr = VCPU(vcpu,dcr);
-
- /* Set guest psr
- * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
- * be: set to the value of dcr.be
- * pp: set to the value of dcr.pp
- */
- vpsr.val &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
- vpsr.val |= ( vdcr & IA64_DCR_BE);
-
- /* VDCR pp bit position is different from VPSR pp bit */
- if ( vdcr & IA64_DCR_PP ) {
- vpsr.val |= IA64_PSR_PP;
- } else {
- vpsr.val &= ~IA64_PSR_PP;
- }
-
- vmx_vcpu_set_psr(vcpu, vpsr.val);
-
-}
-
-void
-inject_guest_interruption(VCPU *vcpu, u64 vec)
-{
- u64 viva;
- REGS *regs;
- ISR pt_isr;
-
- perfc_incra(vmx_inject_guest_interruption, vec >> 8);
-
- regs = vcpu_regs(vcpu);
-
- // clear cr.isr.ir (incomplete register frame)
- pt_isr.val = VMX(vcpu,cr_isr);
- pt_isr.ir = 0;
- VMX(vcpu,cr_isr) = pt_isr.val;
-
- collect_interruption(vcpu);
- vmx_ia64_set_dcr(vcpu);
-
- viva = vmx_vcpu_get_iva(vcpu);
- regs->cr_iip = viva + vec;
-
- debugger_event(vec == IA64_EXTINT_VECTOR ?
- XEN_IA64_DEBUG_ON_EXTINT : XEN_IA64_DEBUG_ON_EXCEPT);
-}
-
-void hvm_pci_intx_assert(
- struct domain *d, unsigned int device, unsigned int intx)
-{
- struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
- unsigned int gsi;
-
- ASSERT((device <= 31) && (intx <= 3));
-
- if ( __test_and_set_bit(device * 4 + intx, &hvm_irq->pci_intx.i) )
- return;
- gsi = hvm_pci_intx_gsi(device, intx);
- if ( ++hvm_irq->gsi_assert_count[gsi] == 1 )
- viosapic_set_irq(d, gsi, 1);
-}
-
-void hvm_pci_intx_deassert(
- struct domain *d, unsigned int device, unsigned int intx)
-{
- struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
- unsigned int gsi;
-
- ASSERT((device <= 31) && (intx <= 3));
-
- if ( !__test_and_clear_bit(device * 4 + intx, &hvm_irq->pci_intx.i) )
- return;
-
- gsi = hvm_pci_intx_gsi(device, intx);
-
- if (--hvm_irq->gsi_assert_count[gsi] == 0)
- viosapic_set_irq(d, gsi, 0);
-}
-
-void hvm_isa_irq_assert(struct domain *d, unsigned int isa_irq)
-{
- /* dummy */
-}
-
-void hvm_isa_irq_deassert(struct domain *d, unsigned int isa_irq)
-{
- /* dummy */
-}
-
-int msixtbl_pt_register(struct domain *d, struct pirq *pirq, uint64_t gtable)
-{
- /* dummy */
- return -ENOSYS;
-}
-
-void msixtbl_pt_unregister(struct domain *d, struct pirq *pirq)
-{
- /* dummy */
-}
diff --git a/xen/arch/ia64/vmx/vmx_ivt.S b/xen/arch/ia64/vmx/vmx_ivt.S
deleted file mode 100644
index 20ff402216..0000000000
--- a/xen/arch/ia64/vmx/vmx_ivt.S
+++ /dev/null
@@ -1,1364 +0,0 @@
-/*
- * arch/ia64/kernel/vmx_ivt.S
- *
- * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger <davidm@hpl.hp.com>
- * Copyright (C) 2000, 2002-2003 Intel Co
- * Asit Mallick <asit.k.mallick@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Kenneth Chen <kenneth.w.chen@intel.com>
- * Fenghua Yu <fenghua.yu@intel.com>
- *
- *
- * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
- * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
- *
- * 05/3/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
- * Supporting Intel virtualization architecture
- *
- */
-
-/*
- * This file defines the interruption vector table used by the CPU.
- * It does not include one entry per possible cause of interruption.
- *
- * The first 20 entries of the table contain 64 bundles each while the
- * remaining 48 entries contain only 16 bundles each.
- *
- * The 64 bundles are used to allow inlining the whole handler for critical
- * interruptions like TLB misses.
- *
- * For each entry, the comment is as follows:
- *
- * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
- * entry offset ----/ / / / /
- * entry number ---------/ / / /
- * size of the entry -------------/ / /
- * vector name -------------------------------------/ /
- * interruptions triggering this vector ----------------------/
- *
- * The table is 32KB in size and must be aligned on 32KB boundary.
- * (The CPU ignores the 15 lower bits of the address)
- *
- * Table is based upon EAS2.6 (Oct 1999)
- */
-
-#include <linux/config.h>
-
-#include <asm/asmmacro.h>
-#include <asm/break.h>
-#include <asm/ia32.h>
-#include <asm/kregs.h>
-#include <asm/offsets.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-#include <asm/thread_info.h>
-#include <asm/unistd.h>
-#include <asm/vhpt.h>
-#include <asm/virt_event.h>
-#include <asm/vmx_phy_mode.h>
-#include <xen/errno.h>
-
-#if 1
-# define PSR_DEFAULT_BITS psr.ac
-#else
-# define PSR_DEFAULT_BITS 0
-#endif
-
-
-#ifdef VTI_DEBUG
-#define IVT_DEBUG_MASK (IVT_DEBUG_SIZE * (IVT_DEBUG_MAX - 1))
-#define VMX_DBG_FAULT(i) \
- mov r31=pr; \
- mov r20=cr.ipsr;; \
- tbit.z p6,p0=r20,IA64_PSR_VM_BIT;; \
-(p6)movl r21=THIS_CPU(cpu_kr)+ \
- IA64_KR_CURRENT_OFFSET;; \
-(p6)ld8 r21=[r21]; \
- mov pr=r31;; \
- add r16=IVT_CUR_OFS,r21; \
- add r17=IVT_DBG_OFS,r21;; \
- ld8 r18=[r16];; \
- add r17=r18,r17; \
- mov r19=cr.iip; \
- mov r22=cr.ifa; \
- mov r23=i;; \
- st8 [r17]=r19,8; \
- add r18=IVT_DEBUG_SIZE,r18;; \
- st8 [r17]=r20,8; \
- mov r19=IVT_DEBUG_MASK;; \
- st8 [r17]=r22,8; \
- and r18=r19,r18;; \
- st8 [r17]=r23; \
- st8 [r16]=r18;;
-#else
-# define VMX_DBG_FAULT(i)
-#endif
-
-#include "vmx_minstate.h"
-
-#define MINSTATE_VIRT /* needed by minstate.h */
-#include "minstate.h"
-
-
-#define VMX_FAULT(n) \
-vmx_fault_##n:; \
- mov r19=n; \
- br.sptk.many dispatch_to_fault_handler; \
- ;;
-
-#define VMX_REFLECT(n) \
- mov r31=pr; \
- mov r19=n; /* prepare to save predicates */ \
- mov r29=cr.ipsr; \
- ;; \
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
-(p7)br.sptk.many vmx_dispatch_reflection; \
- br.sptk.many dispatch_to_fault_handler
-
-#ifdef CONFIG_VMX_PANIC
-GLOBAL_ENTRY(vmx_panic)
- br.sptk.many vmx_panic
- ;;
-END(vmx_panic)
-#endif
-
-
-
-
- .section .text.ivt,"ax"
-
- .align 32768 // align on 32KB boundary
- .global vmx_ia64_ivt
-vmx_ia64_ivt:
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
-ENTRY(vmx_vhpt_miss)
- VMX_DBG_FAULT(0)
- VMX_FAULT(0)
-END(vmx_vhpt_miss)
-
- .org vmx_ia64_ivt+0x400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
-ENTRY(vmx_itlb_miss)
- VMX_DBG_FAULT(1)
- mov r29=cr.ipsr
- mov r31 = pr
- ;;
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT
-(p6) br.sptk vmx_alt_itlb_miss_vmm
- mov r16 = cr.ifa
- ;;
- thash r17 = r16
- ttag r20 = r16
- ;;
- mov r18 = r17
- adds r28 = VLE_TITAG_OFFSET,r17
- adds r19 = VLE_CCHAIN_OFFSET, r17
- ;;
- ld8 r17 = [r19] // Read chain
- ;;
-vmx_itlb_loop:
- cmp.eq p6,p0 = r0, r17 // End of chain ?
-(p6)br vmx_itlb_out
- ;;
- adds r16 = VLE_TITAG_OFFSET, r17
- adds r19 = VLE_CCHAIN_OFFSET, r17
- ;;
- ld8 r24 = [r16] // Read tag
- ld8 r23 = [r19] // Read chain
- ;;
- lfetch [r23]
- cmp.eq p6,p7 = r20, r24 // does tag match ?
- ;;
-(p7)mov r17 = r23; // No: entry = chain
-(p7)br.sptk vmx_itlb_loop // again
- ;;
- // Swap the first entry with the entry found in the collision chain
- // to speed up next hardware search (and keep LRU).
- // In comments 1 stands for the first entry and 2 for the found entry.
- ld8 r29 = [r28] // Read tag of 1
- dep r22 = -1,r24,63,1 // set ti=1 of 2 (to disable it during the swap)
- ;;
- ld8 r25 = [r17] // Read value of 2
- ld8 r27 = [r18] // Read value of 1
- st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET // Write tag of 2
- st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET // Write tag of 1
- mf
- ;;
- ld8 r29 = [r16] // read itir of 2
- ld8 r22 = [r28] // read itir of 1
- st8 [r18] = r25 // Write value of 1
- st8 [r17] = r27 // Write value of 2
- ;;
- st8 [r16] = r22 // Write itir of 2
- st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET // write itir of 1
- ;;
- st8.rel [r28] = r24 // Write tag of 1 (with ti=0)
- // Insert the translation entry
- itc.i r25
- dv_serialize_data
- // Resume
- mov r17=cr.isr
- mov r23=r31
- mov r22=b0
- adds r16=IA64_VPD_BASE_OFFSET,r21
- ;;
- ld8 r18=[r16]
- ;;
- adds r19=VPD(VPSR),r18
- ;;
- ld8 r19=[r19]
- br.sptk ia64_vmm_entry
- ;;
-vmx_itlb_out:
- mov r19 = 1
- br.sptk vmx_dispatch_tlb_miss
- VMX_FAULT(1);
-END(vmx_itlb_miss)
-
- .org vmx_ia64_ivt+0x0800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
-ENTRY(vmx_dtlb_miss)
- VMX_DBG_FAULT(2)
- mov r29=cr.ipsr
- mov r31 = pr
- ;;
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT
-(p6)br.sptk vmx_alt_dtlb_miss_vmm
- mov r16 = cr.ifa
- ;;
- thash r17 = r16
- ttag r20 = r16
- ;;
- mov r18 = r17
- adds r28 = VLE_TITAG_OFFSET,r17
- adds r19 = VLE_CCHAIN_OFFSET, r17
- ;;
- ld8 r17 = [r19]
- ;;
-vmx_dtlb_loop:
- cmp.eq p6,p0 = r0, r17
-(p6)br vmx_dtlb_out
- ;;
- adds r16 = VLE_TITAG_OFFSET, r17
- adds r19 = VLE_CCHAIN_OFFSET, r17
- ;;
- ld8 r24 = [r16]
- ld8 r23 = [r19]
- ;;
- lfetch [r23]
- cmp.eq p6,p7 = r20, r24
- ;;
-(p7)mov r17 = r23;
-(p7)br.sptk vmx_dtlb_loop
- ;;
- ld8 r29 = [r28]
- dep r22 = -1,r24,63,1 //set ti=1
- ;;
- ld8 r25 = [r17]
- ld8 r27 = [r18]
- st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
- st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
- mf
- ;;
- ld8 r29 = [r16]
- ld8 r22 = [r28]
- st8 [r18] = r25
- st8 [r17] = r27
- ;;
- st8 [r16] = r22
- st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET
- ;;
- st8.rel [r28] = r24
- itc.d r25
- dv_serialize_data
- mov r17=cr.isr
- mov r23=r31
- mov r22=b0
- adds r16=IA64_VPD_BASE_OFFSET,r21
- ;;
- ld8 r18=[r16]
- ;;
- adds r19=VPD(VPSR),r18
- ;;
- ld8 r19=[r19]
- br.sptk ia64_vmm_entry
- ;;
-vmx_dtlb_out:
- mov r19 = 2
- br.sptk vmx_dispatch_tlb_miss
- VMX_FAULT(2);
-END(vmx_dtlb_miss)
-
- .org vmx_ia64_ivt+0x0c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
-ENTRY(vmx_alt_itlb_miss)
- VMX_DBG_FAULT(3)
- mov r29=cr.ipsr
- mov r31 = pr
- adds r22=IA64_VCPU_MMU_MODE_OFFSET, r21
- ;;
- tbit.nz p7,p0=r29,IA64_PSR_VM_BIT
-(p7)br.spnt vmx_alt_itlb_miss_dom
-vmx_alt_itlb_miss_vmm:
- mov r16=cr.ifa // get address that caused the TLB miss
- movl r17=PAGE_KERNEL
- mov r24=cr.ipsr
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- ;;
- and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
- extr.u r18=r16,XEN_VIRT_UC_BIT, 1 // extract UC bit
- ;;
- or r19=r17,r19 // insert PTE control bits into r19
- mov r20=IA64_GRANULE_SHIFT<<2
- ;;
- dep r19=r18,r19,4,1 // set bit 4 (uncached) if the access was to UC region
- mov cr.itir=r20
- ;;
- itc.i r19 // insert the TLB entry
- mov pr=r31,-1
- rfi
- ;;
-vmx_alt_itlb_miss_dom:
- ld1 r23=[r22] // Load mmu_mode
- ;;
- cmp.eq p6,p7=VMX_MMU_PHY_D,r23
-(p7)br.sptk vmx_fault_3
- ;;
- mov r19=3
- br.sptk vmx_dispatch_tlb_miss
- VMX_FAULT(3);
-END(vmx_alt_itlb_miss)
-
-
- .org vmx_ia64_ivt+0x1000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
-ENTRY(vmx_alt_dtlb_miss)
- VMX_DBG_FAULT(4)
- mov r29=cr.ipsr //frametable_miss needs ipsr is saved in r29.
- mov r31=pr
- adds r22=IA64_VCPU_MMU_MODE_OFFSET, r21
- ;;
- tbit.nz p7,p0=r29,IA64_PSR_VM_BIT
-(p7)br.spnt vmx_alt_dtlb_miss_dom
-vmx_alt_dtlb_miss_vmm:
- mov r16=cr.ifa // get address that caused the TLB miss
- ;;
-#ifdef CONFIG_VIRTUAL_FRAME_TABLE
- // Test for the address of virtual frame_table
- shr r22=r16,56;;
- cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
-(p8)br.cond.sptk frametable_miss ;; //Make sure ipsr is saved in r29
-#endif
- movl r17=PAGE_KERNEL
- mov r20=cr.isr
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- mov r24=cr.ipsr
- ;;
- and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
- tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
- tbit.nz p8,p0=r16,XEN_VIRT_UC_BIT // is Xen UC region?
- extr.u r23=r16,59,5 // iva fault address
- // 0xc0000000_00000000 >> 59 = 0x18 EFI UC address
- // 0xe0000000_00000000 >> 59 = 0x1c EFI address
-
- and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
- tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
- ;;
- cmp.eq.or p8,p0=0x18,r23 // Region 6 is UC for EFI
-(p9)cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
- dep r24=-1,r24,IA64_PSR_ED_BIT,1
- or r19=r19,r17 // insert PTE control bits into r19
- mov r20=IA64_GRANULE_SHIFT<<2
- ;;
-(p8)dep r19=-1,r19,4,1 // set bit 4 (uncached) if access to UC area
-
-(p6)mov cr.ipsr=r24
- mov cr.itir=r20
- ;;
-(p7)itc.d r19 // insert the TLB entry
- mov pr=r31,-1
- rfi
- ;;
-vmx_alt_dtlb_miss_dom:
- ld1 r23=[r22] // Load mmu_mode
- ;;
- cmp.eq p6,p7=VMX_MMU_PHY_D,r23
-(p7)br.sptk vmx_fault_4
- ;;
- mov r19=4
- br.sptk vmx_dispatch_tlb_miss
- VMX_FAULT(4);
-END(vmx_alt_dtlb_miss)
-
- .org vmx_ia64_ivt+0x1400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
-ENTRY(vmx_nested_dtlb_miss)
- VMX_DBG_FAULT(5)
- mov r29=cr.ipsr
- mov b0=r30
- ;;
- tbit.z p6,p0=r29,IA64_PSR_VM_BIT
-(p6)br.sptk b0 // return to the continuation point
- VMX_FAULT(5)
-END(vmx_nested_dtlb_miss)
-
- .org vmx_ia64_ivt+0x1800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
-ENTRY(vmx_ikey_miss)
- VMX_DBG_FAULT(6)
- VMX_REFLECT(6)
-END(vmx_ikey_miss)
-
- .org vmx_ia64_ivt+0x1c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
-ENTRY(vmx_dkey_miss)
- VMX_DBG_FAULT(7)
- VMX_REFLECT(7)
-END(vmx_dkey_miss)
-
- .org vmx_ia64_ivt+0x2000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
-ENTRY(vmx_dirty_bit)
- VMX_DBG_FAULT(8)
- mov r28=cr.ipsr
- mov r31=pr
- ;;
- mov r19=cr.ifa
- tbit.z p6,p0=r28,IA64_PSR_VM_BIT
-(p6)br.spnt.few vmx_fault_8
- // Prepare for nested dtlb miss
- mov r22=b0
- dep.z r29=r28,IA64_PSR_VM_BIT,1
- ;;
- mov cr.ipsr=r29 // ipsr.vm=0
- movl r30=dirty_bit_tpa_fail
- ;;
- tpa r19=r19 // possibly nested dtlb miss?
- mov cr.ipsr=r28 // ipsr.vm=1
- br.sptk vmx_dispatch_shadow_fault
- VMX_FAULT(8)
-dirty_bit_tpa_fail:
- // Resume & Retry
- mov cr.ipsr=r28 // ipsr.vm=1
- mov r17=cr.isr
- mov r23=r31
-// mov r22=b0 // b0 is clobbered in vmx_nested_dtlb_miss
- adds r16=IA64_VPD_BASE_OFFSET,r21
- ;;
- ld8 r18=[r16]
- ;;
- adds r19=VPD(VPSR),r18
- ;;
- ld8 r19=[r19]
- br.sptk ia64_vmm_entry
- ;;
-END(vmx_dirty_bit)
-
- .org vmx_ia64_ivt+0x2400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
-ENTRY(vmx_iaccess_bit)
- VMX_DBG_FAULT(9)
- VMX_REFLECT(9)
-END(vmx_iaccess_bit)
-
- .org vmx_ia64_ivt+0x2800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
-ENTRY(vmx_daccess_bit)
- VMX_DBG_FAULT(10)
- VMX_REFLECT(10)
-END(vmx_daccess_bit)
-
- .org vmx_ia64_ivt+0x2c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
-ENTRY(vmx_break_fault)
- VMX_DBG_FAULT(11)
- mov r31=pr
- mov r19=11
- mov r17=cr.iim
- mov r29=cr.ipsr
- ;;
- tbit.z p6,p0=r29,IA64_PSR_VM_BIT
-(p6)br.sptk.many vmx_dispatch_break_fault /* make sure before access [r21] */
- adds r22=IA64_VCPU_BREAKIMM_OFFSET, r21
- ;;
- ld4 r22=[r22]
- extr.u r24=r29,IA64_PSR_CPL0_BIT,2
- cmp.ltu p6,p0=NR_hypercalls,r2
- ;;
- cmp.ne.or p6,p0=r22,r17
- cmp.ne.or p6,p0=r0,r24
-(p6) br.sptk.many vmx_dispatch_break_fault
- ;;
- /*
- * The streamlined system call entry/exit paths only save/restore the initial part
- * of pt_regs. This implies that the callers of system-calls must adhere to the
- * normal procedure calling conventions.
- *
- * Registers to be saved & restored:
- * CR registers: cr.ipsr, cr.iip, cr.ifs
- * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
- * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
- * Registers to be restored only:
- * r8-r11: output value from the system call.
- *
- * During system call exit, scratch registers (including r15) are modified/cleared
- * to prevent leaking bits from kernel to user level.
- */
-
- mov r14=r21 // save r21 before bsw.1
- bsw.1 // B (6 cyc) switch to bank 1
- ;;
- mov r29=cr.ipsr // M2 (12 cyc)
- mov r31=pr // I0 (2 cyc)
- mov r16=r14
- mov r15=r2
-
- mov r17=cr.iim // M2 (2 cyc)
- mov.m r27=ar.rsc // M2 (12 cyc)
-
- mov.m ar.rsc=0 // M2
- mov.m r21=ar.fpsr // M2 (12 cyc)
- mov r19=b6 // I0 (2 cyc)
- ;;
- mov.m r23=ar.bspstore // M2 (12 cyc)
- mov.m r24=ar.rnat // M2 (5 cyc)
- mov.i r26=ar.pfs // I0 (2 cyc)
-
- invala // M0|1
- nop.m 0 // M
- mov r20=r1 // A save r1
-
- nop.m 0
- movl r30=ia64_hypercall_table // X
-
- mov r28=cr.iip // M2 (2 cyc)
- //
- // From this point on, we are definitely on the syscall-path
- // and we can use (non-banked) scratch registers.
- //
-///////////////////////////////////////////////////////////////////////
- mov r1=r16 // A move task-pointer to "addl"-addressable reg
- mov r2=r16 // A setup r2 for ia64_syscall_setup
-
- mov r3=NR_hypercalls - 1
- ;;
- mov r9=r0 // force flags = 0
- extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
-
- shladd r30=r15,3,r30 // A r30 = hcall_table + 8*syscall
- addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
- cmp.leu p6,p7=r15,r3 // A syscall number in range?
- ;;
-
- lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
-(p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
- tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
-
- mov.m ar.bspstore=r22 // M2 switch to kernel RBS
- cmp.eq p8,p9=2,r8 // A isr.ei==2?
- ;;
-
-(p8) mov r8=0 // A clear ei to 0
-(p7) movl r30=do_ni_hypercall // X
-
-(p8) adds r28=16,r28 // A switch cr.iip to next bundle
-(p9) adds r8=1,r8 // A increment ei to next slot
- nop.i 0
- ;;
-
- mov.m r25=ar.unat // M2 (5 cyc)
- dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
- //
- // If any of the above loads miss in L1D, we'll stall here until
- // the data arrives.
- //
-///////////////////////////////////////////////////////////////////////
- mov b6=r30 // I0 setup syscall handler branch reg early
-
- mov r18=ar.bsp // M2 (12 cyc)
- ;;
- addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
- br.call.sptk.many b7=ia64_hypercall_setup // B
-1:
- mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
- ;;
- ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
- ;;
-
- srlz.i // M0 ensure interruption collection is on
-(p15) ssm psr.i // M2 restore psr.i
- br.call.sptk.many b0=b6 // B invoke syscall-handker (ignore return addr)
- ;;
- //restore hypercall argument if continuation
- adds r2=IA64_VCPU_HYPERCALL_CONTINUATION_OFS,r13
- ;;
- ld1 r20=[r2]
- ;;
- st1 [r2]=r0
- cmp.ne p6,p0=r20,r0
- ;;
-(p6) adds r2=PT(R16)+16,r12
-(p6) adds r3=PT(R17)+16,r12
- ;;
-(p6) ld8 r32=[r2],16
-(p6) ld8 r33=[r3],16
- ;;
-(p6) ld8 r34=[r2],16
-(p6) ld8 r35=[r3],16
- ;;
-(p6) ld8 r36=[r2],16
- ;;
- br.sptk.many ia64_leave_hypercall
- ;;
-
- VMX_FAULT(11)
-END(vmx_break_fault)
-
- .org vmx_ia64_ivt+0x3000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
-ENTRY(vmx_interrupt)
- VMX_DBG_FAULT(12)
- mov r31=pr // prepare to save predicates
- mov r19=12
- br.sptk vmx_dispatch_interrupt
-END(vmx_interrupt)
-
- .org vmx_ia64_ivt+0x3400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x3400 Entry 13 (size 64 bundles) Reserved
-ENTRY(vmx_virtual_exirq)
- VMX_DBG_FAULT(13)
- mov r31=pr
- mov r19=13
- br.sptk vmx_dispatch_vexirq
-END(vmx_virtual_exirq)
-
- .org vmx_ia64_ivt+0x3800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x3800 Entry 14 (size 64 bundles) Reserved
- VMX_DBG_FAULT(14)
- VMX_FAULT(14)
- // this code segment is from 2.6.16.13
-
- /*
- * There is no particular reason for this code to be here, other than that
- * there happens to be space here that would go unused otherwise. If this
- * fault ever gets "unreserved", simply moved the following code to a more
- * suitable spot...
- *
- * ia64_syscall_setup() is a separate subroutine so that it can
- * allocate stacked registers so it can safely demine any
- * potential NaT values from the input registers.
- *
- * On entry:
- * - executing on bank 0 or bank 1 register set (doesn't matter)
- * - r1: stack pointer
- * - r2: current task pointer
- * - r3: preserved
- * - r11: original contents (saved ar.pfs to be saved)
- * - r12: original contents (sp to be saved)
- * - r13: original contents (tp to be saved)
- * - r15: original contents (syscall # to be saved)
- * - r18: saved bsp (after switching to kernel stack)
- * - r19: saved b6
- * - r20: saved r1 (gp)
- * - r21: saved ar.fpsr
- * - r22: kernel's register backing store base (krbs_base)
- * - r23: saved ar.bspstore
- * - r24: saved ar.rnat
- * - r25: saved ar.unat
- * - r26: saved ar.pfs
- * - r27: saved ar.rsc
- * - r28: saved cr.iip
- * - r29: saved cr.ipsr
- * - r31: saved pr
- * - b0: original contents (to be saved)
- * On exit:
- * - p10: TRUE if syscall is invoked with more than 8 out
- * registers or r15's Nat is true
- * - r1: kernel's gp
- * - r3: preserved (same as on entry)
- * - r8: -EINVAL if p10 is true
- * - r12: points to kernel stack
- * - r13: points to current task
- * - r14: preserved (same as on entry)
- * - p13: preserved
- * - p15: TRUE if interrupts need to be re-enabled
- * - ar.fpsr: set to kernel settings
- * - b6: preserved (same as on entry)
- */
-ENTRY(ia64_hypercall_setup)
-#if PT(B6) != 0
-# error This code assumes that b6 is the first field in pt_regs.
-#endif
- st8 [r1]=r19 // save b6
- add r16=PT(CR_IPSR),r1 // initialize first base pointer
- add r17=PT(R11),r1 // initialize second base pointer
- ;;
- alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
- st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
- tnat.nz p8,p0=in0
-
- st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
- tnat.nz p9,p0=in1
-//(pKStk) mov r18=r0 // make sure r18 isn't NaT
- ;;
-
- st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
- st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
- mov r28=b0 // save b0 (2 cyc)
- ;;
-
- st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
- dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
-(p8) mov in0=-1
- ;;
-
- st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
- extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
- and r8=0x7f,r19 // A // get sof of ar.pfs
-
- st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
- tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
-(p9) mov in1=-1
- ;;
-
-//(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
- sub r18=r18,r22 // r18=RSE.ndirty*8
- tnat.nz p10,p0=in2
- add r11=8,r11
- ;;
-//(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
-//(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
- tnat.nz p11,p0=in3
- ;;
-(p10) mov in2=-1
- tnat.nz p12,p0=in4 // [I0]
-(p11) mov in3=-1
- ;;
-//(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
- st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
-//(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
- st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
- shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
- ;;
- st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
- st8 [r17]=r28,PT(R1)-PT(B0) // save b0
- tnat.nz p13,p0=in5 // [I0]
- ;;
- st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
- st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
-(p12) mov in4=-1
- ;;
-
-.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
-.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
-(p13) mov in5=-1
- ;;
- st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
- tnat.nz p13,p0=in6
- cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
- ;;
- mov r8=1
-(p9) tnat.nz p10,p0=r15
- adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
-
- st8.spill [r17]=r15 // save r15
- tnat.nz p8,p0=in7
- nop.i 0
-
- mov r13=r2 // establish `current'
- movl r1=__gp // establish kernel global pointer
- ;;
- st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
-(p13) mov in6=-1
-(p8) mov in7=-1
-
- cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
- movl r17=FPSR_DEFAULT
- ;;
- mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
-(p10) mov r8=-EINVAL
- br.ret.sptk.many b7
-END(ia64_hypercall_setup)
-
-
- .org vmx_ia64_ivt+0x3c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x3c00 Entry 15 (size 64 bundles) Reserved
- VMX_DBG_FAULT(15)
- VMX_FAULT(15)
-
-
- .org vmx_ia64_ivt+0x4000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x4000 Entry 16 (size 64 bundles) Reserved
- VMX_DBG_FAULT(16)
- VMX_FAULT(16)
-
- .org vmx_ia64_ivt+0x4400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x4400 Entry 17 (size 64 bundles) Reserved
- VMX_DBG_FAULT(17)
- VMX_FAULT(17)
-
- .org vmx_ia64_ivt+0x4800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x4800 Entry 18 (size 64 bundles) Reserved
- VMX_DBG_FAULT(18)
- VMX_FAULT(18)
-
- .org vmx_ia64_ivt+0x4c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x4c00 Entry 19 (size 64 bundles) Reserved
- VMX_DBG_FAULT(19)
- VMX_FAULT(19)
-
- .org vmx_ia64_ivt+0x5000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5000 Entry 20 (size 16 bundles) Page Not Present
-ENTRY(vmx_page_not_present)
- VMX_DBG_FAULT(20)
- VMX_REFLECT(20)
-END(vmx_page_not_present)
-
- .org vmx_ia64_ivt+0x5100
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
-ENTRY(vmx_key_permission)
- VMX_DBG_FAULT(21)
- VMX_REFLECT(21)
-END(vmx_key_permission)
-
- .org vmx_ia64_ivt+0x5200
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
-ENTRY(vmx_iaccess_rights)
- VMX_DBG_FAULT(22)
- VMX_REFLECT(22)
-END(vmx_iaccess_rights)
-
- .org vmx_ia64_ivt+0x5300
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
-ENTRY(vmx_daccess_rights)
- VMX_DBG_FAULT(23)
- VMX_REFLECT(23)
-END(vmx_daccess_rights)
-
- .org vmx_ia64_ivt+0x5400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
-ENTRY(vmx_general_exception)
- VMX_DBG_FAULT(24)
- VMX_REFLECT(24)
-// VMX_FAULT(24)
-END(vmx_general_exception)
-
- .org vmx_ia64_ivt+0x5500
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
-ENTRY(vmx_disabled_fp_reg)
- VMX_DBG_FAULT(25)
- VMX_REFLECT(25)
-END(vmx_disabled_fp_reg)
-
- .org vmx_ia64_ivt+0x5600
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
-ENTRY(vmx_nat_consumption)
- VMX_DBG_FAULT(26)
- VMX_REFLECT(26)
-END(vmx_nat_consumption)
-
- .org vmx_ia64_ivt+0x5700
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
-ENTRY(vmx_speculation_vector)
- VMX_DBG_FAULT(27)
- VMX_REFLECT(27)
-END(vmx_speculation_vector)
-
- .org vmx_ia64_ivt+0x5800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5800 Entry 28 (size 16 bundles) Reserved
- VMX_DBG_FAULT(28)
- VMX_FAULT(28)
-
- .org vmx_ia64_ivt+0x5900
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
-ENTRY(vmx_debug_vector)
- VMX_DBG_FAULT(29)
- VMX_REFLECT(29)
-END(vmx_debug_vector)
-
- .org vmx_ia64_ivt+0x5a00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
-ENTRY(vmx_unaligned_access)
- VMX_DBG_FAULT(30)
- VMX_REFLECT(30)
-END(vmx_unaligned_access)
-
- .org vmx_ia64_ivt+0x5b00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
-ENTRY(vmx_unsupported_data_reference)
- VMX_DBG_FAULT(31)
- VMX_REFLECT(31)
-END(vmx_unsupported_data_reference)
-
- .org vmx_ia64_ivt+0x5c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
-ENTRY(vmx_floating_point_fault)
- VMX_DBG_FAULT(32)
- VMX_REFLECT(32)
-END(vmx_floating_point_fault)
-
- .org vmx_ia64_ivt+0x5d00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
-ENTRY(vmx_floating_point_trap)
- VMX_DBG_FAULT(33)
- VMX_REFLECT(33)
-END(vmx_floating_point_trap)
-
- .org vmx_ia64_ivt+0x5e00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
-ENTRY(vmx_lower_privilege_trap)
- VMX_DBG_FAULT(34)
- VMX_REFLECT(34)
-END(vmx_lower_privilege_trap)
-
- .org vmx_ia64_ivt+0x5f00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
-ENTRY(vmx_taken_branch_trap)
- VMX_DBG_FAULT(35)
- VMX_REFLECT(35)
-END(vmx_taken_branch_trap)
-
- .org vmx_ia64_ivt+0x6000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
-ENTRY(vmx_single_step_trap)
- VMX_DBG_FAULT(36)
- VMX_REFLECT(36)
-END(vmx_single_step_trap)
-
- .global vmx_virtualization_fault_back
- .org vmx_ia64_ivt+0x6100
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
-ENTRY(vmx_virtualization_fault)
-// VMX_DBG_FAULT(37)
- mov r31=pr
-#ifndef CONFIG_XEN_IA64_DISABLE_OPTVFAULT
- movl r30 = virtualization_fault_table
- mov r23=b0
- ;;
- shladd r30=r24,4,r30
- ;;
- mov b0=r30
- br.sptk.many b0
- ;;
-#endif
-vmx_virtualization_fault_back:
- mov r19=37
- adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
- adds r17 = IA64_VCPU_OPCODE_OFFSET,r21
- ;;
- st8 [r16] = r24
- st8 [r17] = r25
- br.sptk vmx_dispatch_virtualization_fault
-END(vmx_virtualization_fault)
-
- .org vmx_ia64_ivt+0x6200
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6200 Entry 38 (size 16 bundles) Reserved
- VMX_DBG_FAULT(38)
- VMX_FAULT(38)
-
- .org vmx_ia64_ivt+0x6300
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6300 Entry 39 (size 16 bundles) Reserved
- VMX_DBG_FAULT(39)
- VMX_FAULT(39)
-
- .org vmx_ia64_ivt+0x6400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6400 Entry 40 (size 16 bundles) Reserved
- VMX_DBG_FAULT(40)
- VMX_FAULT(40)
-
- .org vmx_ia64_ivt+0x6500
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6500 Entry 41 (size 16 bundles) Reserved
- VMX_DBG_FAULT(41)
- VMX_FAULT(41)
-
- .org vmx_ia64_ivt+0x6600
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6600 Entry 42 (size 16 bundles) Reserved
- VMX_DBG_FAULT(42)
- VMX_FAULT(42)
-
- .org vmx_ia64_ivt+0x6700
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6700 Entry 43 (size 16 bundles) Reserved
- VMX_DBG_FAULT(43)
- VMX_FAULT(43)
-
- .org vmx_ia64_ivt+0x6800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6800 Entry 44 (size 16 bundles) Reserved
- VMX_DBG_FAULT(44)
- VMX_FAULT(44)
-
- .org vmx_ia64_ivt+0x6900
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
-ENTRY(vmx_ia32_exception)
- VMX_DBG_FAULT(45)
- VMX_FAULT(45)
-END(vmx_ia32_exception)
-
- .org vmx_ia64_ivt+0x6a00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
-ENTRY(vmx_ia32_intercept)
- VMX_DBG_FAULT(46)
- VMX_FAULT(46)
-END(vmx_ia32_intercept)
-
- .org vmx_ia64_ivt+0x6b00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
-ENTRY(vmx_ia32_interrupt)
- VMX_DBG_FAULT(47)
- VMX_FAULT(47)
-END(vmx_ia32_interrupt)
-
- .org vmx_ia64_ivt+0x6c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6c00 Entry 48 (size 16 bundles) Reserved
- VMX_DBG_FAULT(48)
- VMX_FAULT(48)
-
- .org vmx_ia64_ivt+0x6d00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6d00 Entry 49 (size 16 bundles) Reserved
- VMX_DBG_FAULT(49)
- VMX_FAULT(49)
-
- .org vmx_ia64_ivt+0x6e00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6e00 Entry 50 (size 16 bundles) Reserved
- VMX_DBG_FAULT(50)
- VMX_FAULT(50)
-
- .org vmx_ia64_ivt+0x6f00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6f00 Entry 51 (size 16 bundles) Reserved
- VMX_DBG_FAULT(51)
- VMX_FAULT(51)
-
- .org vmx_ia64_ivt+0x7000
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7000 Entry 52 (size 16 bundles) Reserved
- VMX_DBG_FAULT(52)
- VMX_FAULT(52)
-
- .org vmx_ia64_ivt+0x7100
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7100 Entry 53 (size 16 bundles) Reserved
- VMX_DBG_FAULT(53)
- VMX_FAULT(53)
-
- .org vmx_ia64_ivt+0x7200
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7200 Entry 54 (size 16 bundles) Reserved
- VMX_DBG_FAULT(54)
- VMX_FAULT(54)
-
- .org vmx_ia64_ivt+0x7300
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7300 Entry 55 (size 16 bundles) Reserved
- VMX_DBG_FAULT(55)
- VMX_FAULT(55)
-
- .org vmx_ia64_ivt+0x7400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7400 Entry 56 (size 16 bundles) Reserved
- VMX_DBG_FAULT(56)
- VMX_FAULT(56)
-
- .org vmx_ia64_ivt+0x7500
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7500 Entry 57 (size 16 bundles) Reserved
- VMX_DBG_FAULT(57)
- VMX_FAULT(57)
-
- .org vmx_ia64_ivt+0x7600
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7600 Entry 58 (size 16 bundles) Reserved
- VMX_DBG_FAULT(58)
- VMX_FAULT(58)
-
- .org vmx_ia64_ivt+0x7700
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7700 Entry 59 (size 16 bundles) Reserved
- VMX_DBG_FAULT(59)
- VMX_FAULT(59)
-
- .org vmx_ia64_ivt+0x7800
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7800 Entry 60 (size 16 bundles) Reserved
- VMX_DBG_FAULT(60)
- VMX_FAULT(60)
-
- .org vmx_ia64_ivt+0x7900
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7900 Entry 61 (size 16 bundles) Reserved
- VMX_DBG_FAULT(61)
- VMX_FAULT(61)
-
- .org vmx_ia64_ivt+0x7a00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7a00 Entry 62 (size 16 bundles) Reserved
- VMX_DBG_FAULT(62)
- VMX_FAULT(62)
-
- .org vmx_ia64_ivt+0x7b00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7b00 Entry 63 (size 16 bundles) Reserved
- VMX_DBG_FAULT(63)
- VMX_FAULT(63)
-
- .org vmx_ia64_ivt+0x7c00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7c00 Entry 64 (size 16 bundles) Reserved
- VMX_DBG_FAULT(64)
- VMX_FAULT(64)
-
- .org vmx_ia64_ivt+0x7d00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7d00 Entry 65 (size 16 bundles) Reserved
- VMX_DBG_FAULT(65)
- VMX_FAULT(65)
-
- .org vmx_ia64_ivt+0x7e00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7e00 Entry 66 (size 16 bundles) Reserved
- VMX_DBG_FAULT(66)
- VMX_FAULT(66)
-
- .org vmx_ia64_ivt+0x7f00
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x7f00 Entry 67 (size 16 bundles) Reserved
- VMX_DBG_FAULT(67)
- VMX_FAULT(67)
-
- .org vmx_ia64_ivt+0x8000
-// There is no particular reason for this code to be here, other than that
-// there happens to be space here that would go unused otherwise. If this
-// fault ever gets "unreserved", simply moved the following code to a more
-// suitable spot...
-
-
-ENTRY(vmx_dispatch_reflection)
- /*
- * Input:
- * psr.ic: off
- * r19: intr type (offset into ivt, see ia64_int.h)
- * r31: contains saved predicates (pr)
- */
- VMX_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,5,0
- mov out0=cr.ifa
- mov out1=cr.isr
- mov out2=cr.iim
- mov out3=r15
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- movl r14=ia64_leave_hypervisor
- ;;
- VMX_SAVE_REST
- mov rp=r14
- ;;
- P6_BR_CALL_PANIC(.Lvmx_dispatch_reflection_string)
- adds out4=16,r12
- br.call.sptk.many b6=vmx_reflect_interruption
-END(vmx_dispatch_reflection)
-
-ENTRY(vmx_dispatch_virtualization_fault)
- VMX_SAVE_MIN_WITH_COVER
- ;;
- alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
- mov out0=r13 //vcpu
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- movl r14=ia64_leave_hypervisor_prepare
- ;;
- VMX_SAVE_REST
- VMX_SAVE_EXTRA
- mov rp=r14
- ;;
- P6_BR_CALL_PANIC(.Lvmx_dispatch_virtualization_fault_string)
- adds out1=16,sp //regs
- br.call.sptk.many b6=vmx_emulate
-END(vmx_dispatch_virtualization_fault)
-
-
-GLOBAL_ENTRY(vmx_dispatch_vexirq)
- VMX_SAVE_MIN_WITH_COVER
- alloc r14=ar.pfs,0,0,1,0
- mov out0=r13
-
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer
- ;;
- VMX_SAVE_REST
- movl r14=ia64_leave_hypervisor
- ;;
- mov rp=r14
- P6_BR_CALL_PANIC(.Lvmx_dispatch_vexirq_string)
- br.call.sptk.many b6=vmx_vexirq
-END(vmx_dispatch_vexirq)
-
-ENTRY(vmx_dispatch_tlb_miss)
- VMX_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,3,0
- mov out0=cr.ifa
- mov out1=r15
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- movl r14=ia64_leave_hypervisor
- ;;
- VMX_SAVE_REST
- mov rp=r14
- ;;
- P6_BR_CALL_PANIC(.Lvmx_dispatch_tlb_miss_string)
- adds out2=16,r12
- br.call.sptk.many b6=vmx_hpw_miss
-END(vmx_dispatch_tlb_miss)
-
-ENTRY(vmx_dispatch_break_fault)
- VMX_SAVE_MIN_WITH_COVER_NO_PANIC
- ;;
- alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
- mov out0=cr.ifa
- mov out2=cr.isr // FIXME: pity to make this slow access twice
- mov out3=cr.iim // FIXME: pity to make this slow access twice
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15)ssm psr.i // restore psr.i
-(pUStk)movl r14=ia64_leave_hypervisor
- ;;
-(pKStk)movl r14=ia64_leave_nested
- VMX_SAVE_REST
- mov rp=r14
- ;;
- adds out1=16,sp
- br.call.sptk.many b6=vmx_ia64_handle_break
- ;;
-END(vmx_dispatch_break_fault)
-
-
-ENTRY(vmx_dispatch_interrupt)
- VMX_SAVE_MIN_WITH_COVER_NO_PANIC // uses r31; defines r2 and r3
- ;;
- alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
- ssm psr.ic
- mov out0=cr.ivr // pass cr.ivr as first arg
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- ;;
-(pUStk) movl r14=ia64_leave_hypervisor
- srlz.i
- ;;
-(pKStk) movl r14=ia64_leave_nested
- VMX_SAVE_REST
- add out1=16,sp // pass pointer to pt_regs as second arg
- mov rp=r14
- br.call.sptk.many b6=ia64_handle_irq
-END(vmx_dispatch_interrupt)
-
-
-ENTRY(vmx_dispatch_shadow_fault)
- VMX_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,4,0
- mov out0=cr.ifa
- mov out1=cr.isr
- mov out2=r15
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- movl r14=ia64_leave_hypervisor
- ;;
- VMX_SAVE_REST
- mov rp=r14
- ;;
- P6_BR_CALL_PANIC(.Lvmx_dispatch_shadow_fault_string)
- adds out3=16,r12
- br.call.sptk.many b6=vmx_ia64_shadow_fault
-END(vmx_dispatch_shadow_fault)
-
- .section .rodata, "a"
-.Lvmx_dispatch_reflection_string:
- .asciz "vmx_dispatch_reflection\n"
-.Lvmx_dispatch_virtualization_fault_string:
- .asciz "vmx_dispatch_virtualization_fault\n"
-.Lvmx_dispatch_vexirq_string:
- .asciz "vmx_dispatch_vexirq\n"
-.Lvmx_dispatch_tlb_miss_string:
- .asciz "vmx_dispatch_tlb_miss\n"
-.Lvmx_dispatch_shadow_fault_string:
- .asciz "vmx_dispatch_shadow_fault\n"
- .previous
diff --git a/xen/arch/ia64/vmx/vmx_minstate.h b/xen/arch/ia64/vmx/vmx_minstate.h
deleted file mode 100644
index b6b029d3c9..0000000000
--- a/xen/arch/ia64/vmx/vmx_minstate.h
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * vmx_minstate.h:
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- */
-
-#include <linux/config.h>
-
-#include <asm/asmmacro.h>
-#include <asm/fpu.h>
-#include <asm/mmu_context.h>
-#include <asm/offsets.h>
-#include <asm/pal.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-#include <asm/vmx_pal_vsa.h>
-#include <asm/vmx_vpd.h>
-#include <asm/cache.h>
-#include "entry.h"
-
-#define VMX_MINSTATE_START_SAVE_MIN \
-(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
- ;; \
-(pUStk) mov.m r28=ar.rnat; \
-(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
-(pKStk) mov r1=sp; /* get sp */ \
- ;; \
-(pUStk) lfetch.fault.excl.nt1 [r22]; \
-(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
-(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
- ;; \
-(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
-(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
- ;; \
-(pUStk) mov r18=ar.bsp; \
-(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
-
-#define VMX_MINSTATE_END_SAVE_MIN \
- bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
- ;;
-
-#define PAL_VSA_SYNC_READ \
- /* begin to call pal vps sync_read */ \
-{ .mii; \
-(pUStk) add r25=IA64_VPD_BASE_OFFSET, r21; \
-(pUStk) nop 0x0; \
-(pUStk) mov r24=ip; \
- ;; \
-}; \
-{ .mmb; \
-(pUStk) add r24 = 0x20, r24; \
-(pUStk) ld8 r25=[r25]; /* read vpd base */ \
-(pUStk) br.cond.sptk vmx_vps_sync_read; /* call the service */ \
- ;; \
-};
-
-#define IA64_CURRENT_REG IA64_KR(CURRENT) /* r21 is reserved for current pointer */
-//#define VMX_MINSTATE_GET_CURRENT(reg) mov reg=IA64_CURRENT_REG
-#define VMX_MINSTATE_GET_CURRENT(reg) mov reg=r21
-
-/*
- * VMX_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
- * the minimum state necessary that allows us to turn psr.ic back
- * on.
- *
- * Assumed state upon entry:
- * psr.ic: off
- * r31: contains saved predicates (pr)
- *
- * Upon exit, the state is as follows:
- * psr.ic: off
- * r2 = points to &pt_regs.r16
- * r8 = contents of ar.ccv
- * r9 = contents of ar.csd
- * r10 = contents of ar.ssd
- * r11 = FPSR_DEFAULT
- * r12 = kernel sp (kernel virtual address)
- * r13 = points to current task_struct (kernel virtual address)
- * p6 = (psr.vm || isr.ni)
- * panic if not external interrupt (fault in xen VMM)
- * p15 = TRUE if psr.i is set in cr.ipsr
- * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
- * preserved
- *
- * Note that psr.ic is NOT turned on by this macro. This is so that
- * we can pass interruption state as arguments to a handler.
- */
-
-#ifdef CONFIG_VMX_PANIC
-# define P6_BR_VMX_PANIC (p6)br.spnt.few vmx_panic;
-#else
-# define P6_BR_VMX_PANIC /* nothing */
-#endif
-
-#define P6_BR_CALL_PANIC(panic_string) \
-(p6) movl out0=panic_string; \
-(p6) br.call.spnt.few b6=panic;
-
-#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,VMX_PANIC) \
- mov r27=ar.rsc; /* M */ \
- mov r20=r1; /* A */ \
- mov r25=ar.unat; /* M */ \
- mov r29=cr.ipsr; /* M */ \
- mov r26=ar.pfs; /* I */ \
- mov r18=cr.isr; \
- COVER; /* B;; (or nothing) */ \
- ;; \
- cmp.eq p6,p0=r0,r0; \
- tbit.z pKStk,pUStk=r29,IA64_PSR_VM_BIT; \
- tbit.z p0,p15=r29,IA64_PSR_I_BIT; \
- ;; \
-(pUStk) tbit.nz.and p6,p0=r18,IA64_ISR_NI_BIT; \
-(pUStk)VMX_MINSTATE_GET_CURRENT(r1); \
- VMX_PANIC \
- /* switch from user to kernel RBS: */ \
- ;; \
- invala; /* M */ \
- SAVE_IFS; \
- ;; \
- VMX_MINSTATE_START_SAVE_MIN \
- adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
- adds r16=PT(CR_IPSR),r1; \
- ;; \
- lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
- st8 [r16]=r29; /* save cr.ipsr */ \
- ;; \
- lfetch.fault.excl.nt1 [r17]; \
- mov r29=b0 \
- ;; \
- adds r16=PT(R8),r1; /* initialize first base pointer */ \
- adds r17=PT(R9),r1; /* initialize second base pointer */ \
-(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r8,16; \
-.mem.offset 8,0; st8.spill [r17]=r9,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r10,24; \
-.mem.offset 8,0; st8.spill [r17]=r11,24; \
- ;; \
- mov r9=cr.iip; /* M */ \
- mov r10=ar.fpsr; /* M */ \
- ;; \
- st8 [r16]=r9,16; /* save cr.iip */ \
- st8 [r17]=r30,16; /* save cr.ifs */ \
-(pUStk) sub r18=r18,r22;/* r18=RSE.ndirty*8 */ \
- ;; \
- st8 [r16]=r25,16; /* save ar.unat */ \
- st8 [r17]=r26,16; /* save ar.pfs */ \
- shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
- ;; \
- st8 [r16]=r27,16; /* save ar.rsc */ \
-(pUStk) st8 [r17]=r28,16;/* save ar.rnat */ \
-(pKStk) adds r17=16,r17;/* skip over ar_rnat field */ \
- ;; /* avoid RAW on r16 & r17 */ \
-(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
- st8 [r17]=r31,16; /* save predicates */ \
-(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
- ;; \
- st8 [r16]=r29,16; /* save b0 */ \
- st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
- cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
-.mem.offset 8,0; st8.spill [r17]=r12,16; \
- adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r13,16; \
-.mem.offset 8,0; st8.spill [r17]=r10,16; /* save ar.fpsr */ \
-(pUStk) VMX_MINSTATE_GET_CURRENT(r13); /* establish `current' */ \
-(pKStk) movl r13=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;/* From MINSTATE_GET_CURRENT */\
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r15,16; \
-.mem.offset 8,0; st8.spill [r17]=r14,16; \
-(pKStk) ld8 r13=[r13]; /* establish `current' */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16]=r2,16; \
-.mem.offset 8,0; st8.spill [r17]=r3,16; \
- adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
- ;; \
-(pUStk) adds r16=IA64_VCPU_IIPA_OFFSET,r13; \
-(pUStk) adds r17=IA64_VCPU_ISR_OFFSET,r13; \
-(pUStk) mov r26=cr.iipa; \
-(pUStk) mov r27=cr.isr; \
- ;; \
-(pUStk) st8 [r16]=r26; \
-(pUStk) st8 [r17]=r27; \
- ;; \
- EXTRA; \
- mov r8=ar.ccv; \
- mov r9=ar.csd; \
- mov r10=ar.ssd; \
- movl r11=FPSR_DEFAULT; /* L-unit */ \
- movl r1=__gp; /* establish kernel global pointer */ \
- ;; \
- PAL_VSA_SYNC_READ \
- VMX_MINSTATE_END_SAVE_MIN
-
-/*
- * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
- *
- * Assumed state upon entry:
- * psr.ic: on
- * r2: points to &pt_regs.f6
- * r3: points to &pt_regs.f7
- * r8: contents of ar.ccv
- * r9: contents of ar.csd
- * r10: contents of ar.ssd
- * r11: FPSR_DEFAULT
- *
- * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
- */
-#define VMX_SAVE_REST \
-.mem.offset 0,0; st8.spill [r2]=r16,16; \
-.mem.offset 8,0; st8.spill [r3]=r17,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r18,16; \
-.mem.offset 8,0; st8.spill [r3]=r19,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r20,16; \
-.mem.offset 8,0; st8.spill [r3]=r21,16; \
- mov r18=b6; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r22,16; \
-.mem.offset 8,0; st8.spill [r3]=r23,16; \
- mov r19=b7; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r24,16; \
-.mem.offset 8,0; st8.spill [r3]=r25,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r26,16; \
-.mem.offset 8,0; st8.spill [r3]=r27,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r28,16; \
-.mem.offset 8,0; st8.spill [r3]=r29,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r30,16; \
-.mem.offset 8,0; st8.spill [r3]=r31,32; \
- ;; \
- mov ar.fpsr=r11; \
- st8 [r2]=r8,8; \
- adds r24=PT(B6)-PT(F7),r3; \
- ;; \
- stf.spill [r2]=f6,32; \
- stf.spill [r3]=f7,32; \
- ;; \
- stf.spill [r2]=f8,32; \
- stf.spill [r3]=f9,32; \
- ;; \
- stf.spill [r2]=f10,32; \
- stf.spill [r3]=f11; \
- adds r25=PT(B7)-PT(F11),r3; \
- ;; \
- st8 [r24]=r18,16; /* b6 */ \
- st8 [r25]=r19,16; /* b7 */ \
- adds r3=PT(R5)-PT(F11),r3; \
- ;; \
- st8 [r24]=r9; /* ar.csd */ \
- st8 [r25]=r10; /* ar.ssd */ \
- ;; \
-(pUStk)mov r18=ar.unat; \
-(pUStk)adds r19=PT(EML_UNAT)-PT(R4),r2; \
- ;; \
-(pUStk)st8 [r19]=r18; /* eml_unat */
-
-#define VMX_SAVE_EXTRA \
-.mem.offset 0,0; st8.spill [r2]=r4,16; \
-.mem.offset 8,0; st8.spill [r3]=r5,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2]=r6,16; \
-.mem.offset 8,0; st8.spill [r3]=r7; \
- ;; \
- mov r26=ar.unat; \
- ;; \
- st8 [r2]=r26; /* eml_unat */
-
-#define VMX_SAVE_MIN_WITH_COVER VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,, P6_BR_VMX_PANIC)
-#define VMX_SAVE_MIN_WITH_COVER_NO_PANIC \
- VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,, )
-#define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, P6_BR_VMX_PANIC)
-#define VMX_SAVE_MIN VMX_DO_SAVE_MIN( , mov r30=r0,, P6_BR_VMX_PANIC)
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/ia64/vmx/vmx_phy_mode.c b/xen/arch/ia64/vmx/vmx_phy_mode.c
deleted file mode 100644
index 096c41bc20..0000000000
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_phy_mode.c: emulating domain physical mode.
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Arun Sharma (arun.sharma@intel.com)
- * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
- * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
- */
-
-
-#include <asm/processor.h>
-#include <asm/gcc_intrin.h>
-#include <asm/vmx_phy_mode.h>
-#include <asm/pgtable.h>
-#include <asm/vmmu.h>
-#include <asm/debugger.h>
-
-#define MODE_IND(psr) \
- (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
-
-#define SW_BAD 0 /* Bad mode transitition */
-#define SW_2P_DT 1 /* Physical emulation is activated */
-#define SW_2P_D 2 /* Physical emulation is activated (only for data) */
-#define SW_2V 3 /* Exit physical mode emulation */
-#define SW_SELF 4 /* No mode transition */
-#define SW_NOP 5 /* Mode transition, but without action required */
-
-/*
- * Special notes:
- * - Index by it/dt/rt sequence
- * - Only existing mode transitions are allowed in this table
- * - If gva happens to be rr0 and rr4, only allowed case is identity
- * mapping (gva=gpa), or panic! (How?)
- */
-static const unsigned char mm_switch_table[8][8] = {
- /* 2004/09/12(Kevin): Allow switch to self */
- /*
- * (it,dt,rt): (0,0,0) -> (1,1,1)
- * This kind of transition usually occurs in the very early
- * stage of Linux boot up procedure. Another case is in efi
- * and pal calls. (see "arch/ia64/kernel/head.S")
- *
- * (it,dt,rt): (0,0,0) -> (0,1,1)
- * This kind of transition is found when OSYa exits efi boot
- * service. Due to gva = gpa in this case (Same region),
- * data access can be satisfied though itlb entry for physical
- * emulation is hit.
- *
- * (it,dt,rt): (0,0,0) -> (1,0,1)
- */
- {SW_SELF,0, 0, SW_NOP, 0, SW_2P_D, 0, SW_2V},
- {0, 0, 0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 0, 0, 0, 0},
- /*
- * (it,dt,rt): (0,1,1) -> (1,1,1)
- * This kind of transition is found in OSYa.
- *
- * (it,dt,rt): (0,1,1) -> (0,0,0)
- * This kind of transition is found in OSYa
- */
- {SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_2V},
- /* (1,0,0)->(1,1,1) */
- {0, 0, 0, 0, 0, 0, 0, SW_2V},
- /*
- * (it,dt,rt): (1,0,1) -> (1,1,1)
- * This kind of transition usually occurs when Linux returns
- * from the low level TLB miss handlers.
- * (see "arch/ia64/kernel/ivt.S")
- *
- * (it,dt,rt): (1,0,1) -> (0,0,0)
- */
- {SW_2P_DT, 0, 0, 0, 0, SW_SELF,0, SW_2V},
- {0, 0, 0, 0, 0, 0, 0, 0},
- /*
- * (it,dt,rt): (1,1,1) -> (1,0,1)
- * This kind of transition usually occurs in Linux low level
- * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
- *
- * (it,dt,rt): (1,1,1) -> (0,0,0)
- * This kind of transition usually occurs in pal and efi calls,
- * which requires running in physical mode.
- * (see "arch/ia64/kernel/head.S")
- *
- * (it,dt,rt): (1,1,1)->(1,0,0)
- */
- {SW_2P_DT, 0, 0, 0, SW_2P_D, SW_2P_D, 0, SW_SELF},
-};
-
-void
-physical_mode_init(VCPU *vcpu)
-{
- vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
-}
-
-void
-physical_tlb_miss(VCPU *vcpu, u64 vadr, int type)
-{
- u64 pte;
-
- pte = (vadr & _PAGE_PPN_MASK) | PHY_PAGE_WB;
- thash_vhpt_insert(vcpu, pte, (PAGE_SHIFT << 2), vadr, type);
-}
-
-void
-vmx_init_all_rr(VCPU *vcpu)
-{
- // enable vhpt in guest physical mode
- vcpu->arch.metaphysical_rid_dt |= 1;
-
- VMX(vcpu, vrr[VRN0]) = 0x38;
- vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
- VMX(vcpu, vrr[VRN1]) = 0x38;
- VMX(vcpu, vrr[VRN2]) = 0x38;
- VMX(vcpu, vrr[VRN3]) = 0x38;
- VMX(vcpu, vrr[VRN4]) = 0x38;
- vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
- VMX(vcpu, vrr[VRN5]) = 0x38;
- VMX(vcpu, vrr[VRN6]) = 0x38;
- VMX(vcpu, vrr[VRN7]) = 0x738;
-}
-
-void
-vmx_load_all_rr(VCPU *vcpu)
-{
- unsigned long rr0, rr4;
-
- switch (vcpu->arch.arch_vmx.mmu_mode) {
- case VMX_MMU_VIRTUAL:
- rr0 = vcpu->arch.metaphysical_saved_rr0;
- rr4 = vcpu->arch.metaphysical_saved_rr4;
- break;
- case VMX_MMU_PHY_DT:
- rr0 = vcpu->arch.metaphysical_rid_dt;
- rr4 = vcpu->arch.metaphysical_rid_dt;
- break;
- case VMX_MMU_PHY_D:
- rr0 = vcpu->arch.metaphysical_rid_d;
- rr4 = vcpu->arch.metaphysical_rid_d;
- break;
- default:
- panic_domain(NULL, "bad mmu mode value");
- }
-
- ia64_set_rr((VRN0 << VRN_SHIFT), rr0);
- ia64_dv_serialize_data();
- ia64_set_rr((VRN4 << VRN_SHIFT), rr4);
- ia64_dv_serialize_data();
- ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN3 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN5 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
- ia64_dv_serialize_data();
- vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, VMX(vcpu, vrr[VRN7])));
- ia64_set_pta(VMX(vcpu, mpta));
- vmx_ia64_set_dcr(vcpu);
-
- ia64_srlz_d();
-}
-
-void
-switch_to_physical_rid(VCPU *vcpu)
-{
- u64 psr;
- u64 rr;
-
- switch (vcpu->arch.arch_vmx.mmu_mode) {
- case VMX_MMU_PHY_DT:
- rr = vcpu->arch.metaphysical_rid_dt;
- break;
- case VMX_MMU_PHY_D:
- rr = vcpu->arch.metaphysical_rid_d;
- break;
- default:
- panic_domain(NULL, "bad mmu mode value");
- }
-
- psr = ia64_clear_ic();
- ia64_set_rr(VRN0<<VRN_SHIFT, rr);
- ia64_dv_serialize_data();
- ia64_set_rr(VRN4<<VRN_SHIFT, rr);
- ia64_srlz_d();
-
- ia64_set_psr(psr);
- ia64_srlz_i();
- return;
-}
-
-void
-switch_to_virtual_rid(VCPU *vcpu)
-{
- u64 psr;
-
- psr = ia64_clear_ic();
- ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
- ia64_dv_serialize_data();
- ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
- ia64_srlz_d();
- ia64_set_psr(psr);
- ia64_srlz_i();
- return;
-}
-
-static int mm_switch_action(IA64_PSR opsr, IA64_PSR npsr)
-{
- return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
-}
-
-/* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
- * so that no tlb miss is allowed.
- */
-void
-switch_mm_mode_fast(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
-{
- int act;
- act = mm_switch_action(old_psr, new_psr);
- switch (act) {
- case SW_2P_DT:
- vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
- switch_to_physical_rid(vcpu);
- break;
- case SW_2P_D:
- vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_D;
- switch_to_physical_rid(vcpu);
- break;
- case SW_2V:
- vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_VIRTUAL;
- switch_to_virtual_rid(vcpu);
- break;
- default:
- break;
- }
- return;
-}
-
-void
-switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
-{
- int act;
- /* Switch to physical mode when injecting PAL_INIT */
- if (unlikely(MODE_IND(new_psr) == 0 &&
- vcpu_regs(vcpu)->cr_iip == PAL_INIT_ENTRY))
- act = SW_2P_DT;
- else
- act = mm_switch_action(old_psr, new_psr);
- perfc_incra(vmx_switch_mm_mode, act);
- switch (act) {
- case SW_2P_DT:
- vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
- switch_to_physical_rid(vcpu);
- break;
- case SW_2P_D:
-// printk("V -> P_D mode transition: (0x%lx -> 0x%lx)\n",
-// old_psr.val, new_psr.val);
- vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_D;
- switch_to_physical_rid(vcpu);
- break;
- case SW_2V:
-// printk("P -> V mode transition: (0x%lx -> 0x%lx)\n",
-// old_psr.val, new_psr.val);
- vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_VIRTUAL;
- switch_to_virtual_rid(vcpu);
- break;
- case SW_SELF:
- printk("Switch to self-0x%lx!!! MM mode doesn't change...\n",
- old_psr.val);
- break;
- case SW_NOP:
-// printk("No action required for mode transition: (0x%lx -> 0x%lx)\n",
-// old_psr.val, new_psr.val);
- break;
- default:
- /* Sanity check */
- panic_domain(vcpu_regs(vcpu),
- "Unexpected virtual <--> physical mode transition, "
- "old:%lx, new:%lx\n", old_psr.val, new_psr.val);
- break;
- }
- return;
-}
-
-void
-check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
-{
- if (old_psr.dt != new_psr.dt ||
- old_psr.it != new_psr.it ||
- old_psr.rt != new_psr.rt) {
- switch_mm_mode(vcpu, old_psr, new_psr);
- debugger_event(XEN_IA64_DEBUG_ON_MMU);
- }
-}
-
-
-/*
- * In physical mode, insert tc/tr for region 0 and 4 uses
- * RID[0] and RID[4] which is for physical mode emulation.
- * However what those inserted tc/tr wants is rid for
- * virtual mode. So original virtual rid needs to be restored
- * before insert.
- *
- * Operations which required such switch include:
- * - insertions (itc.*, itr.*)
- * - purges (ptc.* and ptr.*)
- * - tpa
- * - tak
- * - thash?, ttag?
- * All above needs actual virtual rid for destination entry.
- */
-
-void
-prepare_if_physical_mode(VCPU *vcpu)
-{
- if (!is_virtual_mode(vcpu))
- switch_to_virtual_rid(vcpu);
- return;
-}
-
-/* Recover always follows prepare */
-void
-recover_if_physical_mode(VCPU *vcpu)
-{
- if (!is_virtual_mode(vcpu))
- switch_to_physical_rid(vcpu);
- return;
-}
-
diff --git a/xen/arch/ia64/vmx/vmx_support.c b/xen/arch/ia64/vmx/vmx_support.c
deleted file mode 100644
index 98ce5c9511..0000000000
--- a/xen/arch/ia64/vmx/vmx_support.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_support.c: vmx specific support interface.
- * Copyright (c) 2005, Intel Corporation.
- * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <xen/hypercall.h>
-#include <xen/event.h>
-#include <public/sched.h>
-#include <public/hvm/ioreq.h>
-#include <asm/vmx.h>
-#include <asm/vmx_vcpu.h>
-
-/*
- * Only place to call vmx_io_assist is mmio/legacy_io emulation.
- * Since I/O emulation is synchronous, it shouldn't be called in
- * other places. This is not like x86, since IA-64 implements a
- * per-vp stack without continuation.
- */
-void vmx_io_assist(struct vcpu *v)
-{
- ioreq_t *p = get_vio(v);
-
- if (p->state == STATE_IORESP_READY) {
- p->state = STATE_IOREQ_NONE;
- }
- else {
- /* Can't block here, for the same reason as other places to
- * use vmx_wait_io. Simple return is safe since vmx_wait_io will
- * try to block again
- */
- return;
- }
-}
-
-void vmx_send_assist_req(struct vcpu *v)
-{
- ioreq_t *p = get_vio(v);
-
- if (unlikely(p->state != STATE_IOREQ_NONE)) {
- /* This indicates a bug in the device model. Crash the
- domain. */
- printk("Device model set bad IO state %d.\n", p->state);
- domain_crash(v->domain);
- return;
- }
- wmb();
- p->state = STATE_IOREQ_READY;
- notify_via_xen_event_channel(v->domain, v->arch.arch_vmx.xen_port);
-
- for (;;) {
- if (p->state != STATE_IOREQ_READY &&
- p->state != STATE_IOREQ_INPROCESS)
- break;
-
- set_bit(_VPF_blocked_in_xen, &current->pause_flags);
- mb(); /* set blocked status /then/ re-evaluate condition */
- if (p->state != STATE_IOREQ_READY &&
- p->state != STATE_IOREQ_INPROCESS)
- {
- clear_bit(_VPF_blocked_in_xen, &current->pause_flags);
- break;
- }
-
- raise_softirq(SCHEDULE_SOFTIRQ);
- do_softirq();
- mb();
- }
-
- /* the code under this line is completer phase... */
- vmx_io_assist(v);
-}
diff --git a/xen/arch/ia64/vmx/vmx_utility.c b/xen/arch/ia64/vmx/vmx_utility.c
deleted file mode 100644
index 6e00598c33..0000000000
--- a/xen/arch/ia64/vmx/vmx_utility.c
+++ /dev/null
@@ -1,674 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_utility.c:
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Shaofan Li (Susue Li) <susie.li@intel.com>
- * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- */
-
-#include <xen/types.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/processor.h>
-#include <asm/vmx_mm_def.h>
-
-#ifdef CHECK_FAULT
-/*
- * Return:
- * 0: Not reserved indirect registers
- * 1: Is reserved indirect registers
- */
-int
-is_reserved_indirect_register (
- int type,
- int index )
-{
- switch (type) {
- case IA64_CPUID:
- if ( index >= 5 ) {
- return 1;
- }
-
- case IA64_DBR:
- case IA64_IBR:
- //bugbugbug:check with pal about the max ibr/dbr!!!!
- break;
-
- case IA64_PMC:
- //bugbugbug:check with pal about the max ibr/dbr!!!!
- break;
-
- case IA64_PMD:
- //bugbugbug:check with pal about the max ibr/dbr!!!!
- break;
-
- case IA64_PKR:
- //bugbugbug:check with pal about the max pkr!!!!
- break;
-
- case IA64_RR:
- //bugbugbug:check with pal about the max rr!!!!
- break;
-
- default:
- panic ("Unsupported instruction!");
- }
-
- return 0;
-
-}
-#endif
-
-/*
- * Return:
- * Set all ignored fields in value to 0 and return
- */
-u64
-indirect_reg_igfld_MASK (
- int type,
- int index,
- u64 value
- )
-{
- u64 nvalue;
-
- nvalue = value;
- switch ( type ) {
- case IA64_CPUID:
- if ( index == 2 ) {
- nvalue = 0;
- }
- break;
-
- case IA64_DBR:
- case IA64_IBR:
- /* Refer to SDM Vol2 Table 7-1,7-2 */
- if ( index % 2 != 0) {
- /* Ignore field: {61:60} */
- nvalue = value & (~MASK (60, 2));
- }
- break;
- case IA64_PMC:
- if ( index == 0 ) {
- /* Ignore field: 3:1 */
- nvalue = value & (~MASK (1, 3));
- }
- break;
- case IA64_PMD:
- if ( index >= 4 ) {
- /* Ignore field: 7:7 */
- /* bugbug: this code is correct for generic
- * PMD. However, for implementation specific
- * PMD, it's WRONG. need more info to judge
- * what's implementation specific PMD.
- */
- nvalue = value & (~MASK (7, 1));
- }
- break;
- case IA64_PKR:
- case IA64_RR:
- break;
- default:
- panic ("Unsupported instruction!");
- }
-
- return nvalue;
-}
-
-/*
- * Return:
- * Set all ignored fields in value to 0 and return
- */
-u64
-cr_igfld_mask (int index, u64 value)
-{
- u64 nvalue;
-
- nvalue = value;
-
- switch ( index ) {
- case IA64_REG_CR_IVA:
- /* Ignore filed: 14:0 */
- nvalue = value & (~MASK (0, 15));
- break;
-
- case IA64_REG_CR_IHA:
- /* Ignore filed: 1:0 */
- nvalue = value & (~MASK (0, 2));
- break;
-
- case IA64_REG_CR_LID:
- /* Ignore filed: 63:32 */
- nvalue = value & (~MASK (32, 32));
- break;
-
- case IA64_REG_CR_TPR:
- /* Ignore filed: 63:17,3:0 */
- nvalue = value & (~MASK (17, 47));
- nvalue = nvalue & (~MASK (0, 4));
- break;
-
- case IA64_REG_CR_EOI:
- /* Ignore filed: 63:0 */
- nvalue = 0;
- break;
-
- case IA64_REG_CR_ITV:
- case IA64_REG_CR_PMV:
- case IA64_REG_CR_CMCV:
- case IA64_REG_CR_LRR0:
- case IA64_REG_CR_LRR1:
- /* Ignore filed: 63:17,12:12 */
- nvalue = value & (~MASK (17, 47));
- nvalue = nvalue & (~MASK (12, 1));
- break;
- }
-
- return nvalue;
-}
-
-
-/*
- * Return:
- * 1: PSR reserved fields are not zero
- * 0: PSR reserved fields are all zero
- */
-int
-check_psr_rsv_fields (u64 value)
-{
- /* PSR reserved fields: 0, 12~6, 16, 31~28, 63~46
- * These reserved fields shall all be zero
- * Otherwise we will panic
- */
-
- if ( value & MASK (0, 1) ||
- value & MASK (6, 7) ||
- value & MASK (16, 1) ||
- value & MASK (28, 4) ||
- value & MASK (46, 18)
- ) {
- return 1;
- }
-
- return 0;
-}
-
-
-#ifdef CHECK_FAULT
-/*
- * Return:
- * 1: CR reserved fields are not zero
- * 0: CR reserved fields are all zero
- */
-int
-check_cr_rsv_fields (int index, u64 value)
-{
- switch (index) {
- case IA64_REG_CR_DCR:
- if ( (value & MASK ( 3, 5 )) ||
- (value & MASK (15, 49))) {
- return 1;
- }
- return 0;
-
- case IA64_REG_CR_ITM:
- case IA64_REG_CR_IVA:
- case IA64_REG_CR_IIP:
- case IA64_REG_CR_IFA:
- case IA64_REG_CR_IIPA:
- case IA64_REG_CR_IIM:
- case IA64_REG_CR_IHA:
- case IA64_REG_CR_EOI:
- return 0;
-
- case IA64_REG_CR_PTA:
- if ( (value & MASK ( 1, 1 )) ||
- (value & MASK (9, 6))) {
- return 1;
- }
- return 0;
-
- case IA64_REG_CR_IPSR:
- return check_psr_rsv_fields (value);
-
-
- case IA64_REG_CR_ISR:
- if ( (value & MASK ( 24, 8 )) ||
- (value & MASK (44, 20))) {
- return 1;
- }
- return 0;
-
- case IA64_REG_CR_ITIR:
- if ( (value & MASK ( 0, 2 )) ||
- (value & MASK (32, 32))) {
- return 1;
- }
- return 0;
-
- case IA64_REG_CR_IFS:
- if ( (value & MASK ( 38, 25 ))) {
- return 1;
- }
- return 0;
-
- case IA64_REG_CR_LID:
- if ( (value & MASK ( 0, 16 ))) {
- return 1;
- }
- return 0;
-
- case IA64_REG_CR_IVR:
- if ( (value & MASK ( 8, 56 ))) {
- return 1;
- }
- return 0;
-
- case IA64_REG_CR_TPR:
- if ( (value & MASK ( 8, 8 ))) {
- return 1;
- }
- return 0;
-
- case IA64_REG_CR_IRR0:
- if ( (value & MASK ( 1, 1 )) ||
- (value & MASK (3, 13))) {
- return 1;
- }
- return 0;
-
- case IA64_REG_CR_ITV:
- case IA64_REG_CR_PMV:
- case IA64_REG_CR_CMCV:
- if ( (value & MASK ( 8, 4 )) ||
- (value & MASK (13, 3))) {
- return 1;
- }
- return 0;
-
- case IA64_REG_CR_LRR0:
- case IA64_REG_CR_LRR1:
- if ( (value & MASK ( 11, 1 )) ||
- (value & MASK (14, 1))) {
- return 1;
- }
- return 0;
- }
- panic ("Unsupported CR");
- return 0;
-}
-#endif
-
-#if 0
-/*
- * Return:
- * 0: Indirect Reg reserved fields are not zero
- * 1: Indirect Reg reserved fields are all zero
- */
-int
-check_indirect_reg_rsv_fields ( int type, int index, u64 value )
-{
-
- switch ( type ) {
- case IA64_CPUID:
- if ( index == 3 ) {
- if ( value & MASK (40, 24 )) {
- return 0;
- }
- } else if ( index == 4 ) {
- if ( value & MASK (2, 62 )) {
- return 0;
- }
- }
- break;
-
- case IA64_DBR:
- case IA64_IBR:
- case IA64_PMC:
- case IA64_PMD:
- break;
-
- case IA64_PKR:
- if ( value & MASK (4, 4) ||
- value & MASK (32, 32 )) {
- return 0;
- }
- break;
-
- case IA64_RR:
- if ( value & MASK (1, 1) ||
- value & MASK (32, 32 )) {
- return 0;
- }
- break;
-
- default:
- panic ("Unsupported instruction!");
- }
-
- return 1;
-}
-#endif
-
-
-
-/* Return
- * Same format as isr_t
- * Only ei/ni bits are valid, all other bits are zero
- */
-u64
-set_isr_ei_ni (VCPU *vcpu)
-{
-
- IA64_PSR vpsr,ipsr;
- ISR visr;
- REGS *regs;
-
- regs=vcpu_regs(vcpu);
-
- visr.val = 0;
-
- vpsr.val = VCPU(vcpu, vpsr);
-
- if (!vpsr.ic == 1 ) {
- /* Set ISR.ni */
- visr.ni = 1;
- }
- ipsr.val = regs->cr_ipsr;
-
- visr.ei = ipsr.ri;
- return visr.val;
-}
-
-
-/* Set up ISR.na/code{3:0}/r/w for no-access instructions
- * Refer to SDM Vol Table 5-1
- * Parameter:
- * setr: if 1, indicates this function will set up ISR.r
- * setw: if 1, indicates this function will set up ISR.w
- * Return:
- * Same format as ISR. All fields are zero, except na/code{3:0}/r/w
- */
-u64
-set_isr_for_na_inst(VCPU *vcpu, int op)
-{
- ISR visr;
- visr.val = 0;
- switch (op) {
- case IA64_INST_TPA:
- visr.na = 1;
- visr.code = 0;
- break;
- case IA64_INST_TAK:
- visr.na = 1;
- visr.code = 3;
- break;
- }
- return visr.val;
-}
-
-
-
-/*
- * Set up ISR for registe Nat consumption fault
- * Parameters:
- * read: if 1, indicates this is a read access;
- * write: if 1, indicates this is a write access;
- */
-void
-set_rnat_consumption_isr (VCPU *vcpu,int inst,int read,int write)
-{
- ISR visr;
- u64 value;
- /* Need set up ISR: code, ei, ni, na, r/w */
- visr.val = 0;
-
- /* ISR.code{7:4} =1,
- * Set up ISR.code{3:0}, ISR.na
- */
- visr.code = (1 << 4);
- if (inst) {
-
- value = set_isr_for_na_inst (vcpu,inst);
- visr.val = visr.val | value;
- }
-
- /* Set up ISR.r/w */
- visr.r = read;
- visr.w = write;
-
- /* Set up ei/ni */
- value = set_isr_ei_ni (vcpu);
- visr.val = visr.val | value;
-
- vcpu_set_isr (vcpu,visr.val);
-}
-
-
-
-/*
- * Set up ISR for break fault
- */
-void set_break_isr (VCPU *vcpu)
-{
- ISR visr;
- u64 value;
-
- /* Need set up ISR: ei, ni */
-
- visr.val = 0;
-
- /* Set up ei/ni */
- value = set_isr_ei_ni (vcpu);
- visr.val = visr.val | value;
-
- vcpu_set_isr(vcpu, visr.val);
-}
-
-
-
-
-
-
-/*
- * Set up ISR for Priviledged Operation fault
- */
-void set_privileged_operation_isr (VCPU *vcpu,int inst)
-{
- ISR visr;
- u64 value;
-
- /* Need set up ISR: code, ei, ni, na */
-
- visr.val = 0;
-
- /* Set up na, code{3:0} for no-access instruction */
- value = set_isr_for_na_inst (vcpu, inst);
- visr.val = visr.val | value;
-
-
- /* ISR.code{7:4} =1 */
- visr.code = (1 << 4) | visr.code;
-
- /* Set up ei/ni */
- value = set_isr_ei_ni (vcpu);
- visr.val = visr.val | value;
-
- vcpu_set_isr (vcpu, visr.val);
-}
-
-
-
-
-/*
- * Set up ISR for Priviledged Register fault
- */
-void set_privileged_reg_isr (VCPU *vcpu, int inst)
-{
- ISR visr;
- u64 value;
-
- /* Need set up ISR: code, ei, ni */
-
- visr.val = 0;
-
- /* ISR.code{7:4} =2 */
- visr.code = 2 << 4;
-
- /* Set up ei/ni */
- value = set_isr_ei_ni (vcpu);
- visr.val = visr.val | value;
-
- vcpu_set_isr (vcpu, visr.val);
-}
-
-
-
-
-
-/*
- * Set up ISR for Reserved Register/Field fault
- */
-void set_rsv_reg_field_isr (VCPU *vcpu)
-{
- ISR visr;
- u64 value;
-
- /* Need set up ISR: code, ei, ni */
-
- visr.val = 0;
-
- /* ISR.code{7:4} =4 */
- visr.code = (3 << 4) | visr.code;
-
- /* Set up ei/ni */
- value = set_isr_ei_ni (vcpu);
- visr.val = visr.val | value;
-
- vcpu_set_isr (vcpu, visr.val);
-}
-
-
-
-/*
- * Set up ISR for Illegal Operation fault
- */
-void set_illegal_op_isr (VCPU *vcpu)
-{
- ISR visr;
- u64 value;
-
- /* Need set up ISR: ei, ni */
-
- visr.val = 0;
-
- /* Set up ei/ni */
- value = set_isr_ei_ni (vcpu);
- visr.val = visr.val | value;
-
- vcpu_set_isr (vcpu, visr.val);
-}
-
-
-void set_isr_reg_nat_consumption(VCPU *vcpu, u64 flag, u64 non_access)
-{
- ISR isr;
-
- isr.val = 0;
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_REG_NAT_CONSUMPTION_FAULT | flag;
- isr.na = non_access;
- isr.r = 1;
- isr.w = 0;
- vcpu_set_isr(vcpu, isr.val);
- return;
-}
-
-void set_isr_for_priv_fault(VCPU *vcpu, u64 non_access)
-{
- ISR isr;
-
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_PRIV_OP_FAULT;
- isr.na = non_access;
- vcpu_set_isr(vcpu, isr.val);
-
- return;
-}
-
-
-IA64FAULT check_target_register(VCPU *vcpu, u64 reg_index)
-{
- u64 sof;
- REGS *regs;
- regs=vcpu_regs(vcpu);
- sof = regs->cr_ifs & 0x7f;
- if(reg_index >= sof + 32)
- return IA64_FAULT;
- return IA64_NO_FAULT;
-}
-
-
-int is_reserved_rr_register(VCPU* vcpu, int reg_index)
-{
- return (reg_index >= 8);
-}
-
-#define ITIR_RSV_MASK (0x3UL | (((1UL<<32)-1) << 32))
-int is_reserved_itir_field(VCPU* vcpu, u64 itir)
-{
- if ( itir & ITIR_RSV_MASK ) {
- return 1;
- }
- return 0;
-}
-
-static int __is_reserved_rr_field(u64 reg_value)
-{
- ia64_rr rr = { .rrval = reg_value };
-
- if(rr.reserved0 != 0 || rr.reserved1 != 0){
- return 1;
- }
- if(rr.ps < 12 || rr.ps > 28){
- // page too big or small.
- return 1;
- }
- if(rr.ps > 15 && rr.ps % 2 != 0){
- // unsupported page size.
- return 1;
- }
- return 0;
-}
-
-int is_reserved_rr_rid(VCPU* vcpu, u64 reg_value)
-{
- ia64_rr rr = { .rrval = reg_value };
-
- if (rr.rid >= (1UL << vcpu->domain->arch.rid_bits))
- return 1;
-
- return 0;
-}
-
-int is_reserved_rr_field(VCPU* vcpu, u64 reg_value)
-{
- if (__is_reserved_rr_field(reg_value))
- return 1;
-
- return is_reserved_rr_rid(vcpu, reg_value);
-}
diff --git a/xen/arch/ia64/vmx/vmx_vcpu.c b/xen/arch/ia64/vmx/vmx_vcpu.c
deleted file mode 100644
index aaa513c5f9..0000000000
--- a/xen/arch/ia64/vmx/vmx_vcpu.c
+++ /dev/null
@@ -1,587 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_vcpu.c: handling all virtual cpu related thing.
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Fred yang (fred.yang@intel.com)
- * Arun Sharma (arun.sharma@intel.com)
- * Shaofan Li (Susue Li) <susie.li@intel.com>
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- */
-#include <xen/sched.h>
-#include <public/xen.h>
-#include <asm/ia64_int.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/regionreg.h>
-#include <asm/tlb.h>
-#include <asm/processor.h>
-#include <asm/delay.h>
-#include <asm/regs.h>
-#include <asm/gcc_intrin.h>
-#include <asm/vmx_mm_def.h>
-#include <asm/vmx.h>
-#include <asm/vmx_phy_mode.h>
-#include <asm/debugger.h>
-
-/**************************************************************************
- VCPU general register access routines
-**************************************************************************/
-#include <asm/hw_irq.h>
-#include <asm/vmx_pal_vsa.h>
-#include <asm/kregs.h>
-#include <linux/efi.h>
-//unsigned long last_guest_rsm = 0x0;
-
-#ifdef VTI_DEBUG
-struct guest_psr_bundle{
- unsigned long ip;
- unsigned long psr;
-};
-
-struct guest_psr_bundle guest_psr_buf[100];
-unsigned long guest_psr_index = 0;
-#endif
-
-
-void
-vmx_ia64_set_dcr(VCPU *v)
-{
- /* xenoprof:
- * don't change psr.pp.
- * It is manipulated by xenoprof.
- */
- unsigned long dcr_bits = (IA64_DEFAULT_DCR_BITS & ~IA64_DCR_PP) |
- (ia64_getreg(_IA64_REG_CR_DCR) & IA64_DCR_PP);
-
- // if guest is runing on cpl > 0, set dcr.dm=1
- // if geust is runing on cpl = 0, set dcr.dm=0
- // because Guest OS may ld.s on tr mapped page.
- if (!(VCPU(v, vpsr) & IA64_PSR_CPL))
- dcr_bits &= ~IA64_DCR_DM;
-
- ia64_set_dcr(dcr_bits);
-}
-
-
-void
-vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
-{
-
- u64 mask;
- REGS *regs;
- IA64_PSR old_psr, new_psr;
- old_psr.val=VCPU(vcpu, vpsr);
-
- regs=vcpu_regs(vcpu);
- /* We only support guest as:
- * vpsr.pk = 0
- * vpsr.is = 0
- * Otherwise panic
- */
- if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
- panic_domain (regs,"Setting unsupport guest psr!");
- }
-
- /*
- * For those IA64_PSR bits: id/da/dd/ss/ed/ia
- * Since these bits will become 0, after success execution of each
- * instruction, we will change set them to mIA64_PSR
- */
- VCPU(vcpu,vpsr) = value &
- (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
- IA64_PSR_ED | IA64_PSR_IA));
-
- if ( !old_psr.i && (value & IA64_PSR_I) ) {
- // vpsr.i 0->1
- vcpu->arch.irq_new_condition = 1;
- }
- new_psr.val=VCPU(vcpu, vpsr);
-#ifdef VTI_DEBUG
- guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
- guest_psr_buf[guest_psr_index].psr = new_psr.val;
- if (++guest_psr_index >= 100)
- guest_psr_index = 0;
-#endif
-#if 0
- if (old_psr.i != new_psr.i) {
- if (old_psr.i)
- last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
- else
- last_guest_rsm = 0;
- }
-#endif
-
- /*
- * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
- * , except for the following bits:
- * ic/i/dt/si/rt/mc/it/bn/vm
- */
- mask = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI |
- IA64_PSR_RT | IA64_PSR_MC | IA64_PSR_IT | IA64_PSR_BN |
- IA64_PSR_VM;
-
- /* xenoprof:
- * don't change psr.pp.
- * It is manipulated by xenoprof.
- */
- mask |= IA64_PSR_PP;
-
- regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
-
- if (FP_PSR(vcpu) & IA64_PSR_DFH)
- regs->cr_ipsr |= IA64_PSR_DFH;
-
- if (unlikely(vcpu->domain->debugger_attached)) {
- if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_SS)
- regs->cr_ipsr |= IA64_PSR_SS;
- if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_DB)
- regs->cr_ipsr |= IA64_PSR_DB;
- }
-
- check_mm_mode_switch(vcpu, old_psr, new_psr);
- return ;
-}
-
-IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
-{
- REGS *regs = vcpu_regs(vcpu);
- IA64_PSR vpsr;
- vpsr.val = VCPU(vcpu, vpsr);
-
- if(!vpsr.ic)
- VCPU(vcpu,ifs) = regs->cr_ifs;
- regs->cr_ifs = IA64_IFS_V;
- return (IA64_NO_FAULT);
-}
-
-/* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
- * so that no tlb miss is allowed.
- */
-void vmx_vcpu_set_rr_fast(VCPU *vcpu, u64 reg, u64 val)
-{
- u64 rrval;
-
- VMX(vcpu, vrr[reg >> VRN_SHIFT]) = val;
- switch((u64)(reg >> VRN_SHIFT)) {
- case VRN4:
- rrval = vrrtomrr(vcpu, val);
- vcpu->arch.metaphysical_saved_rr4 = rrval;
- if (is_virtual_mode(vcpu) && likely(vcpu == current))
- ia64_set_rr(reg, rrval);
- break;
- case VRN0:
- rrval = vrrtomrr(vcpu, val);
- vcpu->arch.metaphysical_saved_rr0 = rrval;
- if (is_virtual_mode(vcpu) && likely(vcpu == current))
- ia64_set_rr(reg, rrval);
- break;
- default:
- if (likely(vcpu == current))
- ia64_set_rr(reg, vrrtomrr(vcpu, val));
- break;
- }
-}
-
-void __vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid)
-{
- __vmx_switch_rr7(rid, (void *)v->arch.vhpt.hash, v->arch.privregs);
-}
-
-void vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid)
-{
- __get_cpu_var(inserted_vhpt) = (unsigned long)v->arch.vhpt.hash;
- __get_cpu_var(inserted_vpd) = (unsigned long)v->arch.privregs;
- __get_cpu_var(inserted_mapped_regs) = (unsigned long)v->arch.privregs;
- __vmx_switch_rr7_vcpu(v, rid);
-}
-
-IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
-{
- u64 rrval;
-
- if (unlikely(is_reserved_rr_rid(vcpu, val))) {
- gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
- return IA64_RSVDREG_FAULT;
- }
-
- VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
- switch((u64)(reg>>VRN_SHIFT)) {
- case VRN7:
- if (likely(vcpu == current))
- vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, val));
- break;
- case VRN4:
- rrval = vrrtomrr(vcpu,val);
- vcpu->arch.metaphysical_saved_rr4 = rrval;
- if (is_virtual_mode(vcpu) && likely(vcpu == current))
- ia64_set_rr(reg,rrval);
- break;
- case VRN0:
- rrval = vrrtomrr(vcpu,val);
- vcpu->arch.metaphysical_saved_rr0 = rrval;
- if (is_virtual_mode(vcpu) && likely(vcpu == current))
- ia64_set_rr(reg,rrval);
- break;
- default:
- if (likely(vcpu == current))
- ia64_set_rr(reg,vrrtomrr(vcpu,val));
- break;
- }
-
- return (IA64_NO_FAULT);
-}
-
-
-
-/**************************************************************************
- VCPU protection key register access routines
-**************************************************************************/
-
-u64 vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg)
-{
- return ((u64)ia64_get_pkr(reg));
-}
-
-IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, u64 reg, u64 val)
-{
- ia64_set_pkr(reg,val);
- return (IA64_NO_FAULT);
-}
-
-#if 0
-int tlb_debug=0;
-check_entry(u64 va, u64 ps, char *str)
-{
- va &= ~ (PSIZE(ps)-1);
- if ( va == 0x2000000002908000UL ||
- va == 0x600000000000C000UL ) {
- stop();
- }
- if (tlb_debug) printk("%s at %lx %lx\n", str, va, 1UL<<ps);
-}
-#endif
-
-
-u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
-{
- ia64_rr rr,rr1;
- vcpu_get_rr(vcpu,ifa,&rr.rrval);
- rr1.rrval=0;
- rr1.ps=rr.ps;
- rr1.rid=rr.rid;
- return (rr1.rrval);
-}
-
-/* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
- * so that no tlb miss is allowed.
- */
-void vmx_vcpu_mov_to_psr_fast(VCPU *vcpu, u64 value)
-{
- /* TODO: Only allowed for current vcpu */
- u64 old_vpsr, new_vpsr, mipsr, mask;
- old_vpsr = VCPU(vcpu, vpsr);
-
- new_vpsr = (old_vpsr & 0xffffffff00000000) | (value & 0xffffffff);
- VCPU(vcpu, vpsr) = new_vpsr;
-
- mipsr = ia64_getreg(_IA64_REG_CR_IPSR);
-
- /* xenoprof:
- * don't change psr.pp.
- * It is manipulated by xenoprof.
- */
- mask = 0xffffffff00000000 | IA64_PSR_IC | IA64_PSR_I
- | IA64_PSR_DT | IA64_PSR_PP | IA64_PSR_SI | IA64_PSR_RT;
-
- mipsr = (mipsr & mask) | (value & (~mask));
-
- if (FP_PSR(vcpu) & IA64_PSR_DFH)
- mipsr |= IA64_PSR_DFH;
-
- ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
-
- switch_mm_mode_fast(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
-}
-
-#define IA64_PSR_MMU_VIRT (IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT)
-/* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
- * so that no tlb miss is allowed.
- */
-void vmx_vcpu_rfi_fast(VCPU *vcpu)
-{
- /* TODO: Only allowed for current vcpu */
- u64 vifs, vipsr, vpsr, mipsr, mask;
- vipsr = VCPU(vcpu, ipsr);
- vpsr = VCPU(vcpu, vpsr);
- vifs = VCPU(vcpu, ifs);
- if (vipsr & IA64_PSR_BN) {
- if(!(vpsr & IA64_PSR_BN))
- vmx_asm_bsw1();
- } else if (vpsr & IA64_PSR_BN)
- vmx_asm_bsw0();
-
- /*
- * For those IA64_PSR bits: id/da/dd/ss/ed/ia
- * Since these bits will become 0, after success execution of each
- * instruction, we will change set them to mIA64_PSR
- */
- VCPU(vcpu, vpsr) = vipsr & (~ (IA64_PSR_ID |IA64_PSR_DA
- | IA64_PSR_DD | IA64_PSR_ED | IA64_PSR_IA));
-
- /*
- * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
- * , except for the following bits:
- * ic/i/dt/si/rt/mc/it/bn/vm
- */
- /* xenoprof */
- mask = (IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI |
- IA64_PSR_RT | IA64_PSR_MC | IA64_PSR_IT | IA64_PSR_BN |
- IA64_PSR_VM | IA64_PSR_PP);
- mipsr = ia64_getreg(_IA64_REG_CR_IPSR);
- mipsr = (mipsr & mask) | (vipsr & (~mask));
-
- if (FP_PSR(vcpu) & IA64_PSR_DFH)
- mipsr |= IA64_PSR_DFH;
-
- ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
- vmx_ia64_set_dcr(vcpu);
-
- if(vifs >> 63)
- ia64_setreg(_IA64_REG_CR_IFS, vifs);
-
- ia64_setreg(_IA64_REG_CR_IIP, VCPU(vcpu, iip));
-
- switch_mm_mode_fast(vcpu, (IA64_PSR)vpsr, (IA64_PSR)vipsr);
-}
-
-/* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
- * so that no tlb miss is allowed.
- */
-void vmx_vcpu_ssm_fast(VCPU *vcpu, u64 imm24)
-{
- u64 old_vpsr, new_vpsr, mipsr;
-
- old_vpsr = VCPU(vcpu, vpsr);
- new_vpsr = old_vpsr | imm24;
-
- VCPU(vcpu, vpsr) = new_vpsr;
-
- mipsr = ia64_getreg(_IA64_REG_CR_IPSR);
- /* xenoprof:
- * don't change psr.pp.
- * It is manipulated by xenoprof.
- */
- mipsr |= imm24 & (~IA64_PSR_PP);
- ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
-
- switch_mm_mode_fast(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
-}
-
-/* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
- * so that no tlb miss is allowed.
- */
-void vmx_vcpu_rsm_fast(VCPU *vcpu, u64 imm24)
-{
- u64 old_vpsr, new_vpsr, mipsr;
-
- old_vpsr = VCPU(vcpu, vpsr);
- new_vpsr = old_vpsr & ~imm24;
-
- VCPU(vcpu, vpsr) = new_vpsr;
-
- mipsr = ia64_getreg(_IA64_REG_CR_IPSR);
- /* xenoprof:
- * don't change psr.pp.
- * It is manipulated by xenoprof.
- */
- mipsr &= (~imm24) | IA64_PSR_PP;
- mipsr |= IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI;
-
- if (FP_PSR(vcpu) & IA64_PSR_DFH)
- mipsr |= IA64_PSR_DFH;
-
- ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
-
- switch_mm_mode_fast(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
-}
-
-IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
-{
- // TODO: Only allowed for current vcpu
- u64 ifs, psr;
- REGS *regs = vcpu_regs(vcpu);
- psr = VCPU(vcpu,ipsr);
- if (psr & IA64_PSR_BN)
- vcpu_bsw1(vcpu);
- else
- vcpu_bsw0(vcpu);
- vmx_vcpu_set_psr(vcpu,psr);
- vmx_ia64_set_dcr(vcpu);
- ifs=VCPU(vcpu,ifs);
- if(ifs>>63)
- regs->cr_ifs = ifs;
- regs->cr_iip = VCPU(vcpu,iip);
- return (IA64_NO_FAULT);
-}
-
-
-#if 0
-IA64FAULT
-vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, u64 *val)
-{
- IA64_PSR vpsr;
-
- vpsr.val = vmx_vcpu_get_psr(vcpu);
- if ( vpsr.bn ) {
- *val=VCPU(vcpu,vgr[reg-16]);
- // Check NAT bit
- if ( VCPU(vcpu,vnat) & (1UL<<(reg-16)) ) {
- // TODO
- //panic ("NAT consumption fault\n");
- return IA64_FAULT;
- }
-
- }
- else {
- *val=VCPU(vcpu,vbgr[reg-16]);
- if ( VCPU(vcpu,vbnat) & (1UL<<reg) ) {
- //panic ("NAT consumption fault\n");
- return IA64_FAULT;
- }
-
- }
- return IA64_NO_FAULT;
-}
-
-IA64FAULT
-vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
-{
- IA64_PSR vpsr;
- vpsr.val = vmx_vcpu_get_psr(vcpu);
- if ( vpsr.bn ) {
- VCPU(vcpu,vgr[reg-16]) = val;
- if(nat){
- VCPU(vcpu,vnat) |= ( 1UL<<(reg-16) );
- }else{
- VCPU(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
- }
- }
- else {
- VCPU(vcpu,vbgr[reg-16]) = val;
- if(nat){
- VCPU(vcpu,vnat) |= ( 1UL<<(reg) );
- }else{
- VCPU(vcpu,vbnat) &= ~( 1UL<<(reg) );
- }
- }
- return IA64_NO_FAULT;
-}
-
-#endif
-#if 0
-IA64FAULT
-vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, u64 * val)
-{
- REGS *regs=vcpu_regs(vcpu);
- int nat;
- //TODO, Eddie
- if (!regs) return 0;
-#if 0
- if (reg >= 16 && reg < 32) {
- return vmx_vcpu_get_bgr(vcpu,reg,val);
- }
-#endif
- getreg(reg,val,&nat,regs); // FIXME: handle NATs later
- if(nat){
- return IA64_FAULT;
- }
- return IA64_NO_FAULT;
-}
-
-// returns:
-// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
-// IA64_NO_FAULT otherwise
-
-IA64FAULT
-vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
-{
- REGS *regs = vcpu_regs(vcpu);
- long sof = (regs->cr_ifs) & 0x7f;
- //TODO Eddie
-
- if (!regs) return IA64_ILLOP_FAULT;
- if (reg >= sof + 32) return IA64_ILLOP_FAULT;
-#if 0
- if ( reg >= 16 && reg < 32 ) {
- return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
- }
-#endif
- setreg(reg,value,nat,regs);
- return IA64_NO_FAULT;
-}
-
-#endif
-
-/*
- VPSR can't keep track of below bits of guest PSR
- This function gets guest PSR
- */
-
-u64 vmx_vcpu_get_psr(VCPU *vcpu)
-{
- u64 mask;
- REGS *regs = vcpu_regs(vcpu);
- mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
- IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
- return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
-}
-
-IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, u64 imm24)
-{
- u64 vpsr;
- vpsr = vmx_vcpu_get_psr(vcpu);
- vpsr &= (~imm24);
- vmx_vcpu_set_psr(vcpu, vpsr);
- return IA64_NO_FAULT;
-}
-
-
-IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, u64 imm24)
-{
- u64 vpsr;
- vpsr = vmx_vcpu_get_psr(vcpu);
- vpsr |= imm24;
- vmx_vcpu_set_psr(vcpu, vpsr);
- return IA64_NO_FAULT;
-}
-
-
-IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, u64 val)
-{
- val = (val & MASK(0, 32)) | (vmx_vcpu_get_psr(vcpu) & MASK(32, 32));
- vmx_vcpu_set_psr(vcpu, val);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT
-vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
-{
- VCPU(vcpu,tpr)=val;
- vcpu->arch.irq_new_condition = 1;
- return IA64_NO_FAULT;
-}
-
diff --git a/xen/arch/ia64/vmx/vmx_vcpu_save.c b/xen/arch/ia64/vmx/vmx_vcpu_save.c
deleted file mode 100644
index 50cfdacd94..0000000000
--- a/xen/arch/ia64/vmx/vmx_vcpu_save.c
+++ /dev/null
@@ -1,367 +0,0 @@
-/******************************************************************************
- * vmx_vcpu_save.c
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <asm/vmx_vcpu.h>
-#include <asm/vmx_vcpu_save.h>
-#include <asm/hvm/support.h>
-#include <public/hvm/save.h>
-
-void
-vmx_arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
-{
- vpd_t *vpd = (void *)v->arch.privregs;
- struct mapped_regs *vpd_low = &vpd->vpd_low;
- unsigned long nats;
- unsigned long bnats;
-
- union vcpu_ar_regs *ar = &c.nat->regs.ar;
- union vcpu_cr_regs *cr = &c.nat->regs.cr;
- int i;
-
- // banked registers
- if (vpd_low->vpsr & IA64_PSR_BN) {
- for (i = 0; i < 16; i++) {
- //c.nat->regs.r[i + 16] = vpd_low->vgr[i];
- c.nat->regs.bank[i] = vpd_low->vbgr[i];
- }
- nats = vpd_low->vnat;
- bnats = vpd_low->vbnat;
- } else {
- for (i = 0; i < 16; i++) {
- c.nat->regs.bank[i] = vpd_low->vgr[i];
- //c.nat->regs.r[i + 16] = vpd_low->vbgr[i];
- }
- bnats = vpd_low->vnat;
- nats = vpd_low->vbnat;
- }
- // c.nat->regs.nats[0:15] is already set. we shouldn't overwrite.
- c.nat->regs.nats =
- (c.nat->regs.nats & MASK(0, 16)) | (nats & MASK(16, 16));
- c.nat->regs.bnats = bnats & MASK(16, 16);
-
- //c.nat->regs.psr = vpd_low->vpsr;
- //c.nat->regs.pr = vpd_low->vpr;
-
- // ar
- ar->kr[0] = v->arch.arch_vmx.vkr[0];
- ar->kr[1] = v->arch.arch_vmx.vkr[1];
- ar->kr[2] = v->arch.arch_vmx.vkr[2];
- ar->kr[3] = v->arch.arch_vmx.vkr[3];
- ar->kr[4] = v->arch.arch_vmx.vkr[4];
- ar->kr[5] = v->arch.arch_vmx.vkr[5];
- ar->kr[6] = v->arch.arch_vmx.vkr[6];
- ar->kr[7] = v->arch.arch_vmx.vkr[7];
-#ifdef CONFIG_IA32_SUPPORT
- // csd and ssd are done by arch_get_info_guest()
- ar->fcr = v->arch._thread.fcr;
- ar->eflag = v->arch._thread.eflag;
- ar->cflg = v->arch._thread.cflg;
- ar->fsr = v->arch._thread.fsr;
- ar->fir = v->arch._thread.fir;
- ar->fdr = v->arch._thread.fdr;
-#endif
- //ar->itc = vpd_low->itc;//see vtime
-
- // cr
- //cr->dcr = vpd_low->dcr;
- //cr->itm = vpd_low->itm;
- //cr->iva = vpd_low->iva;
- //cr->pta = vpd_low->pta;
- //cr->ipsr = vpd_low->ipsr;
- //cr->isr = vpd_low->isr;
- //cr->iip = vpd_low->iip;
- //cr->ifa = vpd_low->ifa;
- //cr->itir = vpd_low->itir;
- cr->iipa = vpd_low->iipa;
- cr->ifs = vpd_low->ifs;
- //cr->iim = vpd_low->iim;
- //cr->iha = vpd_low->iha;
- cr->lid = vpd_low->lid;
- cr->ivr = vpd_low->ivr;
- //cr->tpr = vpd_low->tpr;
- cr->eoi = vpd_low->eoi;
- //cr->irr[0] = vpd_low->irr[0];
- //cr->irr[1] = vpd_low->irr[1];
- //cr->irr[2] = vpd_low->irr[2];
- //cr->irr[3] = vpd_low->irr[3];
- //cr->itv = vpd_low->itv;
- //cr->pmv = vpd_low->pmv;
- //cr->cmcv = vpd_low->cmcv;
- cr->lrr0 = vpd_low->lrr0;
- cr->lrr1 = vpd_low->lrr1;
-}
-
-int
-vmx_arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c)
-{
- vpd_t *vpd = (void *)v->arch.privregs;
- struct mapped_regs *vpd_low = &vpd->vpd_low;
- unsigned long vnat;
- unsigned long vbnat;
-
- union vcpu_ar_regs *ar = &c.nat->regs.ar;
- union vcpu_cr_regs *cr = &c.nat->regs.cr;
- int i;
-
- // banked registers
- if (c.nat->regs.psr & IA64_PSR_BN) {
- for (i = 0; i < 16; i++) {
- //vpd_low->vgr[i] = c.nat->regs.r[i + 16];
- vpd_low->vbgr[i] = c.nat->regs.bank[i];
- }
- vnat = c.nat->regs.nats;
- vbnat = c.nat->regs.bnats;
- } else {
- for (i = 0; i < 16; i++) {
- vpd_low->vgr[i] = c.nat->regs.bank[i];
- //vpd_low->vbgr[i] = c.nat->regs.r[i + 16];
- }
- vbnat = c.nat->regs.nats;
- vnat = c.nat->regs.bnats;
- }
- vpd_low->vnat = vnat & MASK(16, 16);
- vpd_low->vbnat = vbnat & MASK(16, 16);
- //vpd_low->vpsr = c.nat->regs.psr;
- //vpd_low->vpr = c.nat->regs.pr;
-
- // ar
- v->arch.arch_vmx.vkr[0] = ar->kr[0];
- v->arch.arch_vmx.vkr[1] = ar->kr[1];
- v->arch.arch_vmx.vkr[2] = ar->kr[2];
- v->arch.arch_vmx.vkr[3] = ar->kr[3];
- v->arch.arch_vmx.vkr[4] = ar->kr[4];
- v->arch.arch_vmx.vkr[5] = ar->kr[5];
- v->arch.arch_vmx.vkr[6] = ar->kr[6];
- v->arch.arch_vmx.vkr[7] = ar->kr[7];
-#ifdef CONFIG_IA32_SUPPORT
- v->arch._thread.fcr = ar->fcr;
- v->arch._thread.eflag = ar->eflag;
- v->arch._thread.cflg = ar->cflg;
- v->arch._thread.fsr = ar->fsr;
- v->arch._thread.fir = ar->fir;
- v->arch._thread.fdr = ar->fdr;
-#endif
- //vpd_low->itc = ar->itc;// see vtime.
-
- // cr
- vpd_low->dcr = cr->dcr;
- vpd_low->itm = cr->itm;
- //vpd_low->iva = cr->iva;
- vpd_low->pta = cr->pta;
- vpd_low->ipsr = cr->ipsr;
- vpd_low->isr = cr->isr;
- vpd_low->iip = cr->iip;
- vpd_low->ifa = cr->ifa;
- vpd_low->itir = cr->itir;
- vpd_low->iipa = cr->iipa;
- vpd_low->ifs = cr->ifs;
- vpd_low->iim = cr->iim;
- vpd_low->iha = cr->iha;
- vpd_low->lid = cr->lid;
- vpd_low->tpr = cr->tpr;
- vpd_low->ivr = cr->ivr; //XXX vlsapic
- vpd_low->eoi = cr->eoi;
- if (c.nat->flags & VGCF_SET_CR_IRR) {
- vpd_low->irr[0] = cr->irr[0];
- vpd_low->irr[1] = cr->irr[1];
- vpd_low->irr[2] = cr->irr[2];
- vpd_low->irr[3] = cr->irr[3];
- }
- vpd_low->itv = cr->itv;
- vpd_low->pmv = cr->pmv;
- vpd_low->cmcv = cr->cmcv;
- vpd_low->lrr0 = cr->lrr0;
- vpd_low->lrr1 = cr->lrr1;
-
- v->arch.irq_new_condition = 1;
- return 0;
-}
-
-
-static int vmx_cpu_save(struct domain *d, hvm_domain_context_t *h)
-{
- struct vcpu *v;
-
- for_each_vcpu(d, v) {
- struct pt_regs *regs = vcpu_regs(v);
- struct hvm_hw_ia64_cpu ia64_cpu;
-
- if (test_bit(_VPF_down, &v->pause_flags))
- continue;
-
- memset(&ia64_cpu, 0, sizeof(ia64_cpu));
-
- ia64_cpu.ipsr = regs->cr_ipsr;
-
- if (hvm_save_entry(CPU, v->vcpu_id, h, &ia64_cpu))
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vmx_cpu_load(struct domain *d, hvm_domain_context_t *h)
-{
- int rc = 0;
- uint16_t vcpuid;
- struct vcpu *v;
- struct hvm_hw_ia64_cpu ia64_cpu;
- struct pt_regs *regs;
-
- vcpuid = hvm_load_instance(h);
- if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
- gdprintk(XENLOG_ERR,
- "%s: domain has no vcpu %u\n", __func__, vcpuid);
- rc = -EINVAL;
- goto out;
- }
-
- if (hvm_load_entry(CPU, h, &ia64_cpu) != 0) {
- rc = -EINVAL;
- goto out;
- }
-
- regs = vcpu_regs(v);
- regs->cr_ipsr = ia64_cpu.ipsr | IA64_PSR_VM;
-
- out:
- return rc;
-}
-
-HVM_REGISTER_SAVE_RESTORE(CPU, vmx_cpu_save, vmx_cpu_load, 1, HVMSR_PER_VCPU);
-
-static int vmx_vpd_save(struct domain *d, hvm_domain_context_t *h)
-{
- struct vcpu *v;
-
- for_each_vcpu(d, v) {
- vpd_t *vpd = (void *)v->arch.privregs;
-
- if (test_bit(_VPF_down, &v->pause_flags))
- continue;
-
- // currently struct hvm_hw_ia64_vpd = struct vpd
- // if it is changed, this must be revised.
- if (hvm_save_entry(VPD, v->vcpu_id, h, (struct hvm_hw_ia64_vpd*)vpd))
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vmx_vpd_load(struct domain *d, hvm_domain_context_t *h)
-{
- int rc = 0;
- uint16_t vcpuid;
- struct vcpu *v;
- vpd_t *vpd;
- struct hvm_hw_ia64_vpd *ia64_vpd = NULL;
- int i;
-
- vcpuid = hvm_load_instance(h);
- if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
- gdprintk(XENLOG_ERR,
- "%s: domain has no vcpu %u\n", __func__, vcpuid);
- rc = -EINVAL;
- goto out;
- }
-
- ia64_vpd = xmalloc(struct hvm_hw_ia64_vpd);
- if (ia64_vpd == NULL) {
- gdprintk(XENLOG_ERR,
- "%s: can't allocate memory %d\n", __func__, vcpuid);
- rc = -ENOMEM;
- goto out;
- }
-
- if (hvm_load_entry(VPD, h, ia64_vpd) != 0) {
- rc = -EINVAL;
- goto out;
- }
-
- vpd = (void *)v->arch.privregs;
-#define VPD_COPY(x) vpd->vpd_low.x = ia64_vpd->vpd.vpd_low.x
-
- for (i = 0; i < 16; i++)
- VPD_COPY(vgr[i]);
- for (i = 0; i < 16; i++)
- VPD_COPY(vbgr[i]);
- VPD_COPY(vnat);
- VPD_COPY(vbnat);
- for (i = 0; i < 5; i++)
- VPD_COPY(vcpuid[i]);
- VPD_COPY(vpsr);
- VPD_COPY(vpr);
-
- // cr
-#if 0
- VPD_COPY(dcr);
- VPD_COPY(itm);
- VPD_COPY(iva);
- VPD_COPY(pta);
- VPD_COPY(ipsr);
- VPD_COPY(isr);
- VPD_COPY(iip);
- VPD_COPY(ifa);
- VPD_COPY(itir);
- VPD_COPY(iipa);
- VPD_COPY(ifs);
- VPD_COPY(iim);
- VPD_COPY(iha);
- VPD_COPY(lid);
- VPD_COPY(ivr);
- VPD_COPY(tpr);
- VPD_COPY(eoi);
- VPD_COPY(irr[0]);
- VPD_COPY(irr[1]);
- VPD_COPY(irr[2]);
- VPD_COPY(irr[3]);
- VPD_COPY(itv);
- VPD_COPY(pmv);
- VPD_COPY(cmcv);
- VPD_COPY(lrr0);
- VPD_COPY(lrr1);
-#else
- memcpy(&vpd->vpd_low.vcr[0], &ia64_vpd->vpd.vpd_low.vcr[0],
- sizeof(vpd->vpd_low.vcr));
-#endif
-#undef VPD_COPY
-
- v->arch.irq_new_condition = 1;
-
- out:
- if (ia64_vpd != NULL)
- xfree(ia64_vpd);
- return rc;
-}
-
-HVM_REGISTER_SAVE_RESTORE(VPD, vmx_vpd_save, vmx_vpd_load, 1, HVMSR_PER_VCPU);
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/ia64/vmx/vmx_virt.c b/xen/arch/ia64/vmx/vmx_virt.c
deleted file mode 100644
index bc119e4f91..0000000000
--- a/xen/arch/ia64/vmx/vmx_virt.c
+++ /dev/null
@@ -1,1636 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_virt.c:
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Fred yang (fred.yang@intel.com)
- * Shaofan Li (Susue Li) <susie.li@intel.com>
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- */
-#include <asm/bundle.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/processor.h>
-#include <asm/delay.h> // Debug only
-#include <asm/vmmu.h>
-#include <asm/vmx_mm_def.h>
-#include <asm/smp.h>
-#include <asm/vmx.h>
-#include <asm/virt_event.h>
-#include <asm/vmx_phy_mode.h>
-#include <asm/debugger.h>
-
-#ifdef BYPASS_VMAL_OPCODE
-static void
-ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, u64 * cause)
-{
- *cause=0;
- switch (slot_type) {
- case M:
- if (inst.generic.major==0){
- if(inst.M28.x3==0){
- if(inst.M44.x4==6){
- *cause=EVENT_SSM;
- }else if(inst.M44.x4==7){
- *cause=EVENT_RSM;
- }else if(inst.M30.x4==8&&inst.M30.x2==2){
- *cause=EVENT_MOV_TO_AR_IMM;
- }
- }
- }
- else if(inst.generic.major==1){
- if(inst.M28.x3==0){
- if(inst.M32.x6==0x2c){
- *cause=EVENT_MOV_TO_CR;
- }else if(inst.M33.x6==0x24){
- *cause=EVENT_MOV_FROM_CR;
- }else if(inst.M35.x6==0x2d){
- *cause=EVENT_MOV_TO_PSR;
- }else if(inst.M36.x6==0x25){
- *cause=EVENT_MOV_FROM_PSR;
- }else if(inst.M29.x6==0x2A){
- *cause=EVENT_MOV_TO_AR;
- }else if(inst.M31.x6==0x22){
- *cause=EVENT_MOV_FROM_AR;
- }else if(inst.M45.x6==0x09){
- *cause=EVENT_PTC_L;
- }else if(inst.M45.x6==0x0A){
- *cause=EVENT_PTC_G;
- }else if(inst.M45.x6==0x0B){
- *cause=EVENT_PTC_GA;
- }else if(inst.M45.x6==0x0C){
- *cause=EVENT_PTR_D;
- }else if(inst.M45.x6==0x0D){
- *cause=EVENT_PTR_I;
- }else if(inst.M46.x6==0x1A){
- *cause=EVENT_THASH;
- }else if(inst.M46.x6==0x1B){
- *cause=EVENT_TTAG;
- }else if(inst.M46.x6==0x1E){
- *cause=EVENT_TPA;
- }else if(inst.M46.x6==0x1F){
- *cause=EVENT_TAK;
- }else if(inst.M47.x6==0x34){
- *cause=EVENT_PTC_E;
- }else if(inst.M41.x6==0x2E){
- *cause=EVENT_ITC_D;
- }else if(inst.M41.x6==0x2F){
- *cause=EVENT_ITC_I;
- }else if(inst.M42.x6==0x00){
- *cause=EVENT_MOV_TO_RR;
- }else if(inst.M42.x6==0x01){
- *cause=EVENT_MOV_TO_DBR;
- }else if(inst.M42.x6==0x02){
- *cause=EVENT_MOV_TO_IBR;
- }else if(inst.M42.x6==0x03){
- *cause=EVENT_MOV_TO_PKR;
- }else if(inst.M42.x6==0x04){
- *cause=EVENT_MOV_TO_PMC;
- }else if(inst.M42.x6==0x05){
- *cause=EVENT_MOV_TO_PMD;
- }else if(inst.M42.x6==0x0E){
- *cause=EVENT_ITR_D;
- }else if(inst.M42.x6==0x0F){
- *cause=EVENT_ITR_I;
- }else if(inst.M43.x6==0x10){
- *cause=EVENT_MOV_FROM_RR;
- }else if(inst.M43.x6==0x11){
- *cause=EVENT_MOV_FROM_DBR;
- }else if(inst.M43.x6==0x12){
- *cause=EVENT_MOV_FROM_IBR;
- }else if(inst.M43.x6==0x13){
- *cause=EVENT_MOV_FROM_PKR;
- }else if(inst.M43.x6==0x14){
- *cause=EVENT_MOV_FROM_PMC;
-/*
- }else if(inst.M43.x6==0x15){
- *cause=EVENT_MOV_FROM_PMD;
-*/
- }else if(inst.M43.x6==0x17){
- *cause=EVENT_MOV_FROM_CPUID;
- }
- }
- }
- break;
- case B:
- if(inst.generic.major==0){
- if(inst.B8.x6==0x02){
- *cause=EVENT_COVER;
- }else if(inst.B8.x6==0x08){
- *cause=EVENT_RFI;
- }else if(inst.B8.x6==0x0c){
- *cause=EVENT_BSW_0;
- }else if(inst.B8.x6==0x0d){
- *cause=EVENT_BSW_1;
- }
- }
- case I:
- case F:
- case L:
- case ILLEGAL:
- break;
- }
-}
-#endif
-
-static IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
-{
- u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
- return vmx_vcpu_reset_psr_sm(vcpu,imm24);
-}
-
-static IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
-{
- u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
- return vmx_vcpu_set_psr_sm(vcpu,imm24);
-}
-
-static IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
-{
- u64 tgt = inst.M33.r1;
- u64 val;
-
-/*
- if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, tgt, val);
- else return fault;
- */
- val = vmx_vcpu_get_psr(vcpu);
- val = (val & MASK(0, 32)) | (val & MASK(35, 2));
- return vcpu_set_gr(vcpu, tgt, val, 0);
-}
-
-/**
- * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
- */
-static IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
-{
- u64 val;
-
- if (vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
- panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
-
- return vmx_vcpu_set_psr_l(vcpu, val);
-}
-
-
-/**************************************************************************
-Privileged operation emulation routines
-**************************************************************************/
-
-static IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
-{
- IA64_PSR vpsr;
- REGS *regs;
-#ifdef CHECK_FAULT
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
-
- if (debugger_event(XEN_IA64_DEBUG_ON_RFI)) {
- raise_softirq(SCHEDULE_SOFTIRQ);
- do_softirq();
- }
-
- regs=vcpu_regs(vcpu);
- vpsr.val=regs->cr_ipsr;
- if ( vpsr.is == 1 ) {
- panic_domain(regs,"We do not support IA32 instruction yet");
- }
-
- return vmx_vcpu_rfi(vcpu);
-}
-
-static IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
-{
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- return vcpu_bsw0(vcpu);
-}
-
-static IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
-{
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- return vcpu_bsw1(vcpu);
-}
-
-static IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
-{
- return vmx_vcpu_cover(vcpu);
-}
-
-static IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
-{
- u64 r2,r3;
-#ifdef VMAL_NO_FAULT_CHECK
- IA64_PSR vpsr;
-
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
- if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
-#ifdef VMAL_NO_FAULT_CHECK
- ISR isr;
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif // VMAL_NO_FAULT_CHECK
- }
-#ifdef VMAL_NO_FAULT_CHECK
- if (unimplemented_gva(vcpu,r3) ) {
- unimpl_daddr(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
-
- debugger_event(XEN_IA64_DEBUG_ON_TC);
-
- return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
-}
-
-static IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
-{
- u64 r3;
-#ifdef VMAL_NO_FAULT_CHECK
- IA64_PSR vpsr;
-
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- ISR isr;
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
- if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){
-#ifdef VMAL_NO_FAULT_CHECK
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif // VMAL_NO_FAULT_CHECK
- }
- return vmx_vcpu_ptc_e(vcpu,r3);
-}
-
-static IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
-{
- u64 r2,r3;
-#ifdef VMAL_NO_FAULT_CHECK
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
- if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
-#ifdef VMAL_NO_FAULT_CHECK
- ISR isr;
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif // VMAL_NO_FAULT_CHECK
- }
-#ifdef VMAL_NO_FAULT_CHECK
- if (unimplemented_gva(vcpu,r3) ) {
- unimpl_daddr(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
-
- debugger_event(XEN_IA64_DEBUG_ON_TC);
-
- return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
-}
-
-static IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
-{
- u64 r2,r3;
-#ifdef VMAL_NO_FAULT_CHECK
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
- if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
-#ifdef VMAL_NO_FAULT_CHECK
- ISR isr;
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif // VMAL_NO_FAULT_CHECK
- }
-#ifdef VMAL_NO_FAULT_CHECK
- if (unimplemented_gva(vcpu,r3) ) {
- unimpl_daddr(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
-
- debugger_event(XEN_IA64_DEBUG_ON_TC);
-
- return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
-}
-
-static IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
-{
- IA64FAULT ret1, ret2;
-
-#ifdef VMAL_NO_FAULT_CHECK
- ISR isr;
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
- ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3);
- ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2);
-#ifdef VMAL_NO_FAULT_CHECK
- if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
- }
- if (unimplemented_gva(vcpu,r3) ) {
- unimpl_daddr(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
- return IA64_NO_FAULT;
-}
-
-static IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
-{
- u64 r2,r3;
- if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
- return IA64_FAULT;
-
- debugger_event(XEN_IA64_DEBUG_ON_TR);
-
- return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
-}
-
-static IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
-{
- u64 r2,r3;
- if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
- return IA64_FAULT;
-
- debugger_event(XEN_IA64_DEBUG_ON_TR);
-
- return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));
-}
-
-
-static IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
-{
- u64 r1,r3;
-#ifdef CHECK_FAULT
- ISR visr;
- IA64_PSR vpsr;
- if(check_target_register(vcpu, inst.M46.r1)){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
-#endif //CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
-#ifdef CHECK_FAULT
- vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
- return IA64_NO_FAULT;
-#endif //CHECK_FAULT
- }
-#ifdef CHECK_FAULT
- if(unimplemented_gva(vcpu, r3)){
- vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
- return IA64_NO_FAULT;
- }
-#endif //CHECK_FAULT
- r1 = vmx_vcpu_thash(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
- return(IA64_NO_FAULT);
-}
-
-
-static IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
-{
- u64 r1,r3;
-#ifdef CHECK_FAULT
- ISR visr;
- IA64_PSR vpsr;
-#endif
-#ifdef CHECK_FAULT
- if(check_target_register(vcpu, inst.M46.r1)){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
-#endif //CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
-#ifdef CHECK_FAULT
- vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
- return IA64_NO_FAULT;
-#endif //CHECK_FAULT
- }
-#ifdef CHECK_FAULT
- if(unimplemented_gva(vcpu, r3)){
- vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
- return IA64_NO_FAULT;
- }
-#endif //CHECK_FAULT
- r1 = vmx_vcpu_ttag(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
- return(IA64_NO_FAULT);
-}
-
-
-static IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
-{
- u64 r1,r3;
-#ifdef CHECK_FAULT
- ISR visr;
- if(check_target_register(vcpu, inst.M46.r1)){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if(vpsr.cpl!=0){
- visr.val=0;
- vcpu_set_isr(vcpu, visr.val);
- return IA64_FAULT;
- }
-#endif //CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,1);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
-#ifdef CHECK_FAULT
- if (unimplemented_gva(vcpu,r3) ) {
- // inject unimplemented_data_address_fault
- visr.val = set_isr_ei_ni(vcpu);
- visr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
- // FAULT_UNIMPLEMENTED_DATA_ADDRESS.
- unimpl_daddr(vcpu);
- return IA64_FAULT;
- }
-#endif //CHECK_FAULT
-
- if(vmx_vcpu_tpa(vcpu, r3, &r1)){
- return IA64_FAULT;
- }
- vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
- return(IA64_NO_FAULT);
-}
-
-static IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
-{
- u64 r1,r3;
-#ifdef CHECK_FAULT
- ISR visr;
- IA64_PSR vpsr;
- int fault=IA64_NO_FAULT;
- visr.val=0;
- if(check_target_register(vcpu, inst.M46.r1)){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if(vpsr.cpl!=0){
- vcpu_set_isr(vcpu, visr.val);
- return IA64_FAULT;
- }
-#endif
- if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,1);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif
- }
- r1 = vmx_vcpu_tak(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
- return(IA64_NO_FAULT);
-}
-
-
-/************************************
- * Insert translation register/cache
-************************************/
-
-static IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
-{
- u64 itir, ifa, pte, slot;
- ISR isr;
-
-#ifdef VMAL_NO_FAULT_CHECK
- IA64_PSR vpsr;
-
- vpsr.val = vmx_vcpu_get_psr(vcpu);
- if (vpsr.ic) {
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr(vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
- if (vcpu_get_gr_nat(vcpu, inst.M45.r3, &slot)
- || vcpu_get_gr_nat(vcpu, inst.M45.r2, &pte)) {
-#ifdef VMAL_NO_FAULT_CHECK
- set_isr_reg_nat_consumption(vcpu, 0, 0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif // VMAL_NO_FAULT_CHECK
- }
-#ifdef VMAL_NO_FAULT_CHECK
- if (is_reserved_rr_register(vcpu, slot)) {
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
-
- if (vcpu_get_itir(vcpu ,&itir)) {
- return(IA64_FAULT);
- }
- if (vcpu_get_ifa(vcpu, &ifa)) {
- return(IA64_FAULT);
- }
-#ifdef VMAL_NO_FAULT_CHECK
- if (is_reserved_itir_field(vcpu, itir)) {
- // TODO
- return IA64_FAULT;
- }
- if (unimplemented_gva(vcpu, ifa)) {
- unimpl_daddr(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
-
- if (slot >= NDTRS) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
- rsv_reg_field(vcpu);
- return IA64_FAULT;
- }
-
- debugger_event(XEN_IA64_DEBUG_ON_TR);
-
- return (vmx_vcpu_itr_d(vcpu, slot, pte, itir, ifa));
-}
-
-static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
-{
- u64 itir, ifa, pte, slot;
- ISR isr;
-#ifdef VMAL_NO_FAULT_CHECK
- IA64_PSR vpsr;
- vpsr.val = vmx_vcpu_get_psr(vcpu);
- if (vpsr.ic) {
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr(vcpu, 0);
- privilege_op(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
- if (vcpu_get_gr_nat(vcpu, inst.M45.r3, &slot)
- || vcpu_get_gr_nat(vcpu, inst.M45.r2, &pte)) {
-#ifdef VMAL_NO_FAULT_CHECK
- set_isr_reg_nat_consumption(vcpu, 0, 0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif // VMAL_NO_FAULT_CHECK
- }
-#ifdef VMAL_NO_FAULT_CHECK
- if (is_reserved_rr_register(vcpu, slot)) {
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
-
- if (vcpu_get_itir(vcpu, &itir)) {
- return IA64_FAULT;
- }
- if (vcpu_get_ifa(vcpu, &ifa)) {
- return IA64_FAULT;
- }
-#ifdef VMAL_NO_FAULT_CHECK
- if (is_reserved_itir_field(vcpu, itir)) {
- // TODO
- return IA64_FAULT;
- }
- if (unimplemented_gva(vcpu, ifa)) {
- unimpl_daddr(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
-
- if (slot >= NITRS) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
- rsv_reg_field(vcpu);
- return IA64_FAULT;
- }
-
- debugger_event(XEN_IA64_DEBUG_ON_TR);
-
- return vmx_vcpu_itr_i(vcpu, slot, pte, itir, ifa);
-}
-
-static IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst,
- u64 *itir, u64 *ifa, u64 *pte)
-{
- IA64FAULT ret1;
-
-#ifdef VMAL_NO_FAULT_CHECK
- IA64_PSR vpsr;
- vpsr.val = vmx_vcpu_get_psr(vcpu);
- if (vpsr.ic) {
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
-
- u64 fault;
- ISR isr;
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr(vcpu, 0);
- privilege_op(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
- ret1 = vcpu_get_gr_nat(vcpu, inst.M45.r2,pte);
-#ifdef VMAL_NO_FAULT_CHECK
- if (ret1 != IA64_NO_FAULT) {
- set_isr_reg_nat_consumption(vcpu, 0, 0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
-
- if (vcpu_get_itir(vcpu, itir)) {
- return IA64_FAULT;
- }
- if (vcpu_get_ifa(vcpu, ifa)) {
- return IA64_FAULT;
- }
-#ifdef VMAL_NO_FAULT_CHECK
- if (unimplemented_gva(vcpu,ifa) ) {
- unimpl_daddr(vcpu);
- return IA64_FAULT;
- }
-#endif // VMAL_NO_FAULT_CHECK
- return IA64_NO_FAULT;
-}
-
-static IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
-{
- u64 itir, ifa, pte;
-
- if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
- return IA64_FAULT;
- }
-
- debugger_event(XEN_IA64_DEBUG_ON_TC);
-
- return vmx_vcpu_itc_d(vcpu, pte, itir, ifa);
-}
-
-static IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
-{
- u64 itir, ifa, pte;
-
- if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
- return IA64_FAULT;
- }
-
- debugger_event(XEN_IA64_DEBUG_ON_TC);
-
- return vmx_vcpu_itc_i(vcpu, pte, itir, ifa);
-}
-
-/*************************************
- * Moves to semi-privileged registers
-*************************************/
-
-static IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
-{
- // I27 and M30 are identical for these fields
- u64 imm;
-
- if(inst.M30.ar3!=44){
- panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
- }
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- if(inst.M30.s){
- imm = -inst.M30.imm;
- }else{
- imm = inst.M30.imm;
- }
- return (vmx_vcpu_set_itc(vcpu, imm));
-}
-
-static IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
-{
- // I26 and M29 are identical for these fields
- u64 r2;
- if(inst.M29.ar3!=44){
- panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
- }
- if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- return (vmx_vcpu_set_itc(vcpu, r2));
-}
-
-
-static IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
-{
- // I27 and M30 are identical for these fields
- u64 r1;
- if(inst.M31.ar3!=44){
- panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
- }
-#ifdef CHECK_FAULT
- if(check_target_register(vcpu,inst.M31.r1)){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.si&& vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- r1 = vmx_vcpu_get_itc(vcpu);
- vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
- return IA64_NO_FAULT;
-}
-
-
-/********************************
- * Moves to privileged registers
-********************************/
-
-static IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r2;
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
- return (vmx_vcpu_set_pkr(vcpu,r3,r2));
-}
-
-static IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r2;
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
- return (vmx_vcpu_set_rr(vcpu,r3,r2));
-}
-
-static IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r2;
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
- return (vmx_vcpu_set_dbr(vcpu,r3,r2));
-}
-
-static IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r2;
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
- return vmx_vcpu_set_ibr(vcpu,r3,r2);
-}
-
-static IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r2;
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
- return (vmx_vcpu_set_pmc(vcpu,r3,r2));
-}
-
-static IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r2;
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
- return (vmx_vcpu_set_pmd(vcpu,r3,r2));
-}
-
-
-/**********************************
- * Moves from privileged registers
- **********************************/
-
-static IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r1;
-#ifdef CHECK_FAULT
- if(check_target_register(vcpu, inst.M43.r1)){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-
-#endif //CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
-#ifdef CHECK_FAULT
- if(is_reserved_rr_register(vcpu,r3>>VRN_SHIFT)){
- set_rsv_reg_field_isr(vcpu);
- rsv_reg_field(vcpu);
- }
-#endif //CHECK_FAULT
- vcpu_get_rr(vcpu,r3,&r1);
- return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
-}
-
-static IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r1;
-#ifdef CHECK_FAULT
- if(check_target_register(vcpu, inst.M43.r1)){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-
-#endif //CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
-#ifdef CHECK_FAULT
- if(is_reserved_indirect_register(vcpu,r3)){
- set_rsv_reg_field_isr(vcpu);
- rsv_reg_field(vcpu);
- return IA64_FAULT;
- }
-#endif //CHECK_FAULT
- r1 = vmx_vcpu_get_pkr(vcpu, r3);
- return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
-}
-
-static IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r1;
- IA64FAULT res;
-#ifdef CHECK_FAULT
- if(check_target_register(vcpu, inst.M43.r1)){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-
-#endif //CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
-#ifdef CHECK_FAULT
- if(is_reserved_indirect_register(vcpu,r3)){
- set_rsv_reg_field_isr(vcpu);
- rsv_reg_field(vcpu);
- return IA64_FAULT;
- }
-#endif //CHECK_FAULT
- res = vmx_vcpu_get_dbr(vcpu, r3, &r1);
- if (res != IA64_NO_FAULT)
- return res;
- return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
-}
-
-static IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r1;
- IA64FAULT res;
-#ifdef CHECK_FAULT
- if(check_target_register(vcpu, inst.M43.r1)){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-
-#endif //CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
-#ifdef CHECK_FAULT
- if(is_reserved_indirect_register(vcpu,r3)){
- set_rsv_reg_field_isr(vcpu);
- rsv_reg_field(vcpu);
- return IA64_FAULT;
- }
-#endif //CHECK_FAULT
- res = vmx_vcpu_get_ibr(vcpu, r3, &r1);
- if (res != IA64_NO_FAULT)
- return res;
- return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
-}
-
-static IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r1;
-#ifdef CHECK_FAULT
- if(check_target_register(vcpu, inst.M43.r1)){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if (vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-
-#endif //CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
-#ifdef CHECK_FAULT
- if(is_reserved_indirect_register(vcpu,r3)){
- set_rsv_reg_field_isr(vcpu);
- rsv_reg_field(vcpu);
- return IA64_FAULT;
- }
-#endif //CHECK_FAULT
- r1 = vmx_vcpu_get_pmc(vcpu, r3);
- return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
-}
-
-static IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
-{
- u64 r3,r1;
-#ifdef CHECK_FAULT
- if(check_target_register(vcpu, inst.M43.r1)){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
-#endif //CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
-#ifdef CHECK_FAULT
- if(is_reserved_indirect_register(vcpu,r3)){
- set_rsv_reg_field_isr(vcpu);
- rsv_reg_field(vcpu);
- return IA64_FAULT;
- }
-#endif //CHECK_FAULT
- r1 = vmx_vcpu_get_cpuid(vcpu, r3);
- return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
-}
-
-static IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
-{
- u64 r2;
- extern u64 cr_igfld_mask(int index, u64 value);
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if(is_reserved_cr(inst.M32.cr3)||(vpsr.ic&&is_interruption_control_cr(inst.M32.cr3))){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
- if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){
-#ifdef CHECK_FAULT
- set_isr_reg_nat_consumption(vcpu,0,0);
- rnat_comsumption(vcpu);
- return IA64_FAULT;
-#endif //CHECK_FAULT
- }
-#ifdef CHECK_FAULT
- if ( check_cr_rsv_fields (inst.M32.cr3, r2)) {
- /* Inject Reserved Register/Field fault
- * into guest */
- set_rsv_reg_field_isr (vcpu,0);
- rsv_reg_field (vcpu);
- return IA64_FAULT;
- }
-#endif //CHECK_FAULT
- r2 = cr_igfld_mask(inst.M32.cr3,r2);
- switch (inst.M32.cr3) {
- case 0: return vcpu_set_dcr(vcpu,r2);
- case 1: return vmx_vcpu_set_itm(vcpu,r2);
- case 2: return vmx_vcpu_set_iva(vcpu,r2);
- case 8: return vmx_vcpu_set_pta(vcpu,r2);
- case 16:return vcpu_set_ipsr(vcpu,r2);
- case 17:return vcpu_set_isr(vcpu,r2);
- case 19:return vcpu_set_iip(vcpu,r2);
- case 20:return vcpu_set_ifa(vcpu,r2);
- case 21:return vcpu_set_itir(vcpu,r2);
- case 22:return vcpu_set_iipa(vcpu,r2);
- case 23:return vcpu_set_ifs(vcpu,r2);
- case 24:return vcpu_set_iim(vcpu,r2);
- case 25:return vcpu_set_iha(vcpu,r2);
- case 64:printk("SET LID to 0x%lx\n", r2);
- return IA64_NO_FAULT;
- case 65:return IA64_NO_FAULT;
- case 66:return vmx_vcpu_set_tpr(vcpu,r2);
- case 67:return vmx_vcpu_set_eoi(vcpu,r2);
- case 68:return IA64_NO_FAULT;
- case 69:return IA64_NO_FAULT;
- case 70:return IA64_NO_FAULT;
- case 71:return IA64_NO_FAULT;
- case 72:return vmx_vcpu_set_itv(vcpu,r2);
- case 73:return vmx_vcpu_set_pmv(vcpu,r2);
- case 74:return vmx_vcpu_set_cmcv(vcpu,r2);
- case 80:return vmx_vcpu_set_lrr0(vcpu,r2);
- case 81:return vmx_vcpu_set_lrr1(vcpu,r2);
- default:VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
- return IA64_NO_FAULT;
- }
-}
-
-
-#define cr_get(cr) \
- ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
- vcpu_set_gr(vcpu, tgt, val,0):fault;
-
-//#define cr_get(cr) (vcpu_set_gr(vcpu, tgt, vcpu_get##cr(vcpu), 0)
-
-/*
-#define vmx_cr_get(cr) \
- ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
- vcpu_set_gr(vcpu, tgt, val,0):fault;
-*/
-
-#define vmx_cr_get(cr) (vcpu_set_gr(vcpu, tgt, vmx_vcpu_get_##cr(vcpu), 0))
-
-static IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
-{
- u64 tgt = inst.M33.r1;
- u64 val;
- IA64FAULT fault;
-#ifdef CHECK_FAULT
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- if(is_reserved_cr(inst.M33.cr3)||is_read_only_cr(inst.M33.cr3||
- (vpsr.ic&&is_interruption_control_cr(inst.M33.cr3)))){
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- return IA64_FAULT;
- }
- if ( vpsr.cpl != 0) {
- /* Inject Privileged Operation fault into guest */
- set_privileged_operation_isr (vcpu, 0);
- privilege_op (vcpu);
- return IA64_FAULT;
- }
-#endif // CHECK_FAULT
-
-// from_cr_cnt[inst.M33.cr3]++;
- switch (inst.M33.cr3) {
- case 0: return cr_get(dcr);
- case 1: return vmx_cr_get(itm);
- case 2: return vmx_cr_get(iva);
- case 8: return vmx_cr_get(pta);
- case 16:return cr_get(ipsr);
- case 17:return cr_get(isr);
- case 19:return cr_get(iip);
- case 20:return cr_get(ifa);
- case 21:return cr_get(itir);
- case 22:return cr_get(iipa);
- case 23:return cr_get(ifs);
- case 24:return cr_get(iim);
- case 25:return cr_get(iha);
- case 64:return vmx_cr_get(lid);
- case 65:
- val = vmx_vcpu_get_ivr(vcpu);
- return vcpu_set_gr(vcpu,tgt,val,0);
- case 66:return vmx_cr_get(tpr);
- case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
- case 68:return vmx_cr_get(irr0);
- case 69:return vmx_cr_get(irr1);
- case 70:return vmx_cr_get(irr2);
- case 71:return vmx_cr_get(irr3);
- case 72:return vmx_cr_get(itv);
- case 73:return vmx_cr_get(pmv);
- case 74:return vmx_cr_get(cmcv);
- case 80:return vmx_cr_get(lrr0);
- case 81:return vmx_cr_get(lrr1);
- default: return IA64_NO_FAULT;
- }
-}
-
-
-//#define BYPASS_VMAL_OPCODE
-extern IA64_SLOT_TYPE slot_types[0x20][3];
-unsigned long
-__vmx_get_domain_bundle(u64 iip, IA64_BUNDLE *pbundle)
-{
- return fetch_code(current, iip, pbundle);
-}
-
-/** Emulate a privileged operation.
- *
- *
- * @param vcpu virtual cpu
- * @cause the reason cause virtualization fault
- * @opcode the instruction code which cause virtualization fault
- */
-
-void
-vmx_emulate(VCPU *vcpu, REGS *regs)
-{
- IA64FAULT status;
- INST64 inst;
- u64 iip, cause, opcode;
- iip = regs->cr_iip;
- cause = VMX(vcpu,cause);
- opcode = VMX(vcpu,opcode);
-
-#ifdef VTLB_DEBUG
- check_vtlb_sanity(vmx_vcpu_get_vtlb(vcpu));
- dump_vtlb(vmx_vcpu_get_vtlb(vcpu));
-#endif
-#if 0
-if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {
- printk ("VMAL decode error: cause - %lx; op - %lx\n",
- cause, opcode );
- return;
-}
-#endif
-#ifdef BYPASS_VMAL_OPCODE
- // make a local copy of the bundle containing the privop
- IA64_BUNDLE bundle;
- int slot;
- IA64_SLOT_TYPE slot_type;
- IA64_PSR vpsr;
- bundle = __vmx_get_domain_bundle(iip);
- slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
- if (!slot) inst.inst = bundle.slot0;
- else if (slot == 1)
- inst.inst = bundle.slot1a + (bundle.slot1b<<18);
- else if (slot == 2) inst.inst = bundle.slot2;
- else printk("priv_handle_op: illegal slot: %d\n", slot);
- slot_type = slot_types[bundle.template][slot];
- ia64_priv_decoder(slot_type, inst, &cause);
- if(cause==0){
- panic_domain(regs,"This instruction at 0x%lx slot %d can't be virtualized", iip, slot);
- }
-#else
- inst.inst=opcode;
-#endif /* BYPASS_VMAL_OPCODE */
-
- debugger_event(XEN_IA64_DEBUG_ON_PRIVOP);
-
- /*
- * Switch to actual virtual rid in rr0 and rr4,
- * which is required by some tlb related instructions.
- */
- prepare_if_physical_mode(vcpu);
-
- switch(cause) {
- case EVENT_RSM:
- perfc_incr(vmx_rsm);
- status=vmx_emul_rsm(vcpu, inst);
- break;
- case EVENT_SSM:
- perfc_incr(vmx_ssm);
- status=vmx_emul_ssm(vcpu, inst);
- break;
- case EVENT_MOV_TO_PSR:
- perfc_incr(vmx_mov_to_psr);
- status=vmx_emul_mov_to_psr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_PSR:
- perfc_incr(vmx_mov_from_psr);
- status=vmx_emul_mov_from_psr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_CR:
- perfc_incr(vmx_mov_from_cr);
- status=vmx_emul_mov_from_cr(vcpu, inst);
- break;
- case EVENT_MOV_TO_CR:
- perfc_incr(vmx_mov_to_cr);
- status=vmx_emul_mov_to_cr(vcpu, inst);
- break;
- case EVENT_BSW_0:
- perfc_incr(vmx_bsw0);
- status=vmx_emul_bsw0(vcpu, inst);
- break;
- case EVENT_BSW_1:
- perfc_incr(vmx_bsw1);
- status=vmx_emul_bsw1(vcpu, inst);
- break;
- case EVENT_COVER:
- perfc_incr(vmx_cover);
- status=vmx_emul_cover(vcpu, inst);
- break;
- case EVENT_RFI:
- perfc_incr(vmx_rfi);
- status=vmx_emul_rfi(vcpu, inst);
- break;
- case EVENT_ITR_D:
- perfc_incr(vmx_itr_d);
- status=vmx_emul_itr_d(vcpu, inst);
- break;
- case EVENT_ITR_I:
- perfc_incr(vmx_itr_i);
- status=vmx_emul_itr_i(vcpu, inst);
- break;
- case EVENT_PTR_D:
- perfc_incr(vmx_ptr_d);
- status=vmx_emul_ptr_d(vcpu, inst);
- break;
- case EVENT_PTR_I:
- perfc_incr(vmx_ptr_i);
- status=vmx_emul_ptr_i(vcpu, inst);
- break;
- case EVENT_ITC_D:
- perfc_incr(vmx_itc_d);
- status=vmx_emul_itc_d(vcpu, inst);
- break;
- case EVENT_ITC_I:
- perfc_incr(vmx_itc_i);
- status=vmx_emul_itc_i(vcpu, inst);
- break;
- case EVENT_PTC_L:
- perfc_incr(vmx_ptc_l);
- status=vmx_emul_ptc_l(vcpu, inst);
- break;
- case EVENT_PTC_G:
- perfc_incr(vmx_ptc_g);
- status=vmx_emul_ptc_g(vcpu, inst);
- break;
- case EVENT_PTC_GA:
- perfc_incr(vmx_ptc_ga);
- status=vmx_emul_ptc_ga(vcpu, inst);
- break;
- case EVENT_PTC_E:
- perfc_incr(vmx_ptc_e);
- status=vmx_emul_ptc_e(vcpu, inst);
- break;
- case EVENT_MOV_TO_RR:
- perfc_incr(vmx_mov_to_rr);
- status=vmx_emul_mov_to_rr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_RR:
- perfc_incr(vmx_mov_from_rr);
- status=vmx_emul_mov_from_rr(vcpu, inst);
- break;
- case EVENT_THASH:
- perfc_incr(vmx_thash);
- status=vmx_emul_thash(vcpu, inst);
- break;
- case EVENT_TTAG:
- perfc_incr(vmx_ttag);
- status=vmx_emul_ttag(vcpu, inst);
- break;
- case EVENT_TPA:
- perfc_incr(vmx_tpa);
- status=vmx_emul_tpa(vcpu, inst);
- break;
- case EVENT_TAK:
- perfc_incr(vmx_tak);
- status=vmx_emul_tak(vcpu, inst);
- break;
- case EVENT_MOV_TO_AR_IMM:
- perfc_incr(vmx_mov_to_ar_imm);
- status=vmx_emul_mov_to_ar_imm(vcpu, inst);
- break;
- case EVENT_MOV_TO_AR:
- perfc_incr(vmx_mov_to_ar_reg);
- status=vmx_emul_mov_to_ar_reg(vcpu, inst);
- break;
- case EVENT_MOV_FROM_AR:
- perfc_incr(vmx_mov_from_ar_reg);
- status=vmx_emul_mov_from_ar_reg(vcpu, inst);
- break;
- case EVENT_MOV_TO_DBR:
- perfc_incr(vmx_mov_to_dbr);
- status=vmx_emul_mov_to_dbr(vcpu, inst);
- break;
- case EVENT_MOV_TO_IBR:
- perfc_incr(vmx_mov_to_ibr);
- status=vmx_emul_mov_to_ibr(vcpu, inst);
- break;
- case EVENT_MOV_TO_PMC:
- perfc_incr(vmx_mov_to_pmc);
- status=vmx_emul_mov_to_pmc(vcpu, inst);
- break;
- case EVENT_MOV_TO_PMD:
- perfc_incr(vmx_mov_to_pmd);
- status=vmx_emul_mov_to_pmd(vcpu, inst);
- break;
- case EVENT_MOV_TO_PKR:
- perfc_incr(vmx_mov_to_pkr);
- status=vmx_emul_mov_to_pkr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_DBR:
- perfc_incr(vmx_mov_from_dbr);
- status=vmx_emul_mov_from_dbr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_IBR:
- perfc_incr(vmx_mov_from_ibr);
- status=vmx_emul_mov_from_ibr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_PMC:
- perfc_incr(vmx_mov_from_pmc);
- status=vmx_emul_mov_from_pmc(vcpu, inst);
- break;
- case EVENT_MOV_FROM_PKR:
- perfc_incr(vmx_mov_from_pkr);
- status=vmx_emul_mov_from_pkr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_CPUID:
- perfc_incr(vmx_mov_from_cpuid);
- status=vmx_emul_mov_from_cpuid(vcpu, inst);
- break;
- case EVENT_VMSW:
- printk ("Unimplemented instruction %ld\n", cause);
- status=IA64_FAULT;
- break;
- default:
- panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n",
- cause,regs->cr_iip,regs->cr_ipsr);
- break;
- };
-
-#if 0
- if (status != IA64_NO_FAULT)
- panic("Emulation failed with cause %d:\n", cause);
-#endif
-
- switch (status) {
- case IA64_RSVDREG_FAULT:
- set_rsv_reg_field_isr(vcpu);
- rsv_reg_field(vcpu);
- break;
- case IA64_ILLOP_FAULT:
- set_illegal_op_isr(vcpu);
- illegal_op(vcpu);
- break;
- case IA64_FAULT:
- /* Registers aleady set. */
- break;
- case IA64_NO_FAULT:
- if ( cause != EVENT_RFI )
- vcpu_increment_iip(vcpu);
- break;
- }
-
-
- recover_if_physical_mode(vcpu);
- return;
-
-}
-
diff --git a/xen/arch/ia64/vmx/vmx_vsa.S b/xen/arch/ia64/vmx/vmx_vsa.S
deleted file mode 100644
index 31e4ea86a7..0000000000
--- a/xen/arch/ia64/vmx/vmx_vsa.S
+++ /dev/null
@@ -1,84 +0,0 @@
-/* -*- Mode:ASM; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_vsa.c: Call PAL virtualization services.
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Arun Sharma <arun.sharma@intel.com>
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- */
-
-#include <asm/asmmacro.h>
-
-
- .text
-
-/*
- * extern UINT64 ia64_call_vsa(UINT64 proc,UINT64 arg1, UINT64 arg2,
- * UINT64 arg3, UINT64 arg4, UINT64 arg5,
- * UINT64 arg6, UINT64 arg7);
- *
- * XXX: The currently defined services use only 4 args at the max. The
- * rest are not consumed.
- */
-GLOBAL_ENTRY(ia64_call_vsa)
- .regstk 4,4,0,0
-
-rpsave = loc0
-pfssave = loc1
-psrsave = loc2
-entry = loc3
-hostret = r24
-
- alloc pfssave=ar.pfs,4,4,0,0
- mov rpsave=rp
- movl entry=@gprel(__vsa_base)
-1: mov hostret=ip
- mov r25=in1 // copy arguments
- mov r26=in2
- mov r27=in3
- mov psrsave=psr
- ;;
- add entry=entry,gp
- tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
- tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
- ;;
- ld8 entry=[entry] // read entry point
- ;;
- add hostret=2f-1b,hostret // calculate return address
- add entry=entry,in0
- ;;
- rsm psr.i | psr.ic
- ;;
- srlz.d
- mov b6=entry
- br.cond.sptk b6 // call the service
-2:
- // Architectural sequence for enabling interrupts if necessary
-(p7) ssm psr.ic
- ;;
-(p7) srlz.d
- ;;
-(p6) ssm psr.i
- ;;
- mov rp=rpsave
- mov ar.pfs=pfssave
- mov r8=r31
- ;;
- srlz.d
- br.ret.sptk rp
-
-END(ia64_call_vsa)
-
diff --git a/xen/arch/ia64/vmx/vtlb.c b/xen/arch/ia64/vmx/vtlb.c
deleted file mode 100644
index 9f16e45914..0000000000
--- a/xen/arch/ia64/vmx/vtlb.c
+++ /dev/null
@@ -1,764 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vtlb.c: guest virtual tlb handling module.
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- * XiaoYan Feng (Fleming Feng) (Fleming.feng@intel.com)
- */
-
-#include <asm/vmx_vcpu.h>
-#include <asm/vmx_phy_mode.h>
-#include <asm/shadow.h>
-
-static u64 translate_phy_pte(VCPU *v, u64 pte, u64 itir, u64 va);
-static thash_data_t *__alloc_chain(thash_cb_t *);
-
-static inline void cch_mem_init(thash_cb_t *hcb)
-{
- hcb->cch_free_idx = 0;
- hcb->cch_freelist = NULL;
-}
-
-static thash_data_t *cch_alloc(thash_cb_t *hcb)
-{
- thash_data_t *p;
- if ( (p = hcb->cch_freelist) != NULL ) {
- hcb->cch_freelist = p->next;
- return p;
- }
- if (hcb->cch_free_idx < hcb->cch_sz/sizeof(thash_data_t)) {
- p = &((thash_data_t *)hcb->cch_buf)[hcb->cch_free_idx++];
- p->page_flags = 0;
- p->itir = 0;
- p->next = NULL;
- return p;
- }
- return NULL;
-}
-
-/*
- * Check to see if the address rid:va is translated by the TLB
- */
-
-static inline int __is_tr_translated(thash_data_t *trp, u64 rid, u64 va)
-{
- return (trp->p) && (trp->rid == rid) && ((va-trp->vadr) < PSIZE(trp->ps));
-}
-
-/*
- * Only for GUEST TR format.
- */
-static int
-__is_tr_overlap(thash_data_t *trp, u64 rid, u64 sva, u64 eva)
-{
- uint64_t sa1, ea1;
-
- if (!trp->p || trp->rid != rid ) {
- return 0;
- }
- sa1 = trp->vadr;
- ea1 = sa1 + PSIZE(trp->ps) - 1;
- eva -= 1;
- if (sva > ea1 || sa1 > eva)
- return 0;
- else
- return 1;
-
-}
-
-static thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
-{
-
- thash_data_t *trp;
- int i;
- u64 rid;
-
- vcpu_get_rr(vcpu, va, &rid);
- rid &= RR_RID_MASK;
- if (is_data) {
- if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
- trp = (thash_data_t *)vcpu->arch.dtrs;
- for (i = 0; i < NDTRS; i++, trp++) {
- if (__is_tr_translated(trp, rid, va)) {
- return trp;
- }
- }
- }
- }
- else {
- if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
- trp = (thash_data_t *)vcpu->arch.itrs;
- for (i = 0; i < NITRS; i++, trp++) {
- if (__is_tr_translated(trp, rid, va)) {
- return trp;
- }
- }
- }
- }
- return NULL;
-}
-
-static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash,
- thash_data_t *tail)
-{
- thash_data_t *head = hash->next;
-
- hash->next = 0;
- tail->next = hcb->cch_freelist;
- hcb->cch_freelist = head;
-}
-
-static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
-{
- u64 tag, len;
- ia64_rr rr;
- thash_data_t *head, *cch;
-
- pte &= ((~PAGE_FLAGS_RV_MASK)|_PAGE_VIRT_D);
- rr.rrval = ia64_get_rr(ifa);
- head = (thash_data_t *)ia64_thash(ifa);
- tag = ia64_ttag(ifa);
-
- if (!INVALID_VHPT(head)) {
- /* Find a free (ie invalid) entry. */
- len = 0;
- cch = head;
- do {
- ++len;
- if (cch->next == NULL) {
- if (len >= MAX_CCN_DEPTH) {
- thash_recycle_cch(hcb, head, cch);
- cch = cch_alloc(hcb);
- } else {
- cch = __alloc_chain(hcb);
- }
- cch->next = head->next;
- head->next = cch;
- break;
- }
- cch = cch->next;
- } while (!INVALID_VHPT(cch));
-
- /* As we insert in head, copy head. */
- local_irq_disable();
- cch->page_flags = head->page_flags;
- cch->itir = head->itir;
- cch->etag = head->etag;
- head->ti = 1;
- local_irq_enable();
- }
- /* here head is invalid. */
- wmb();
- head->page_flags=pte;
- head->itir = rr.ps << 2;
- *(volatile unsigned long*)&head->etag = tag;
- return;
-}
-
-void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va, int type)
-{
- u64 phy_pte, psr;
- ia64_rr mrr;
-
- phy_pte = translate_phy_pte(v, pte, itir, va);
- mrr.rrval = ia64_get_rr(va);
-
- if (itir_ps(itir) >= mrr.ps && VMX_MMU_MODE(v) != VMX_MMU_PHY_D) {
- vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
- } else {
- if (VMX_MMU_MODE(v) == VMX_MMU_PHY_D)
- itir = (itir & ~RR_PS_MASK) | (mrr.rrval & RR_PS_MASK);
- phy_pte &= ~PAGE_FLAGS_RV_MASK; /* Clear reserved fields. */
- psr = ia64_clear_ic();
- ia64_itc(type + 1, va, phy_pte, itir);
- ia64_set_psr(psr);
- ia64_srlz_i();
- }
-}
-
-/* On itr.d, old entries are not purged (optimization for Linux - see
- vmx_vcpu_itr_d). Fixup possible mismatch. */
-int vhpt_access_rights_fixup(VCPU *v, u64 ifa, int is_data)
-{
- thash_data_t *trp, *data;
- u64 ps, tag, mask;
-
- trp = __vtr_lookup(v, ifa, is_data);
- if (trp) {
- ps = _REGION_PAGE_SIZE(ia64_get_rr(ifa));
- if (trp->ps < ps)
- return 0;
- ifa = PAGEALIGN(ifa, ps);
- data = (thash_data_t *)ia64_thash(ifa);
- tag = ia64_ttag(ifa);
- do {
- if (data->etag == tag) {
- mask = trp->page_flags & PAGE_FLAGS_AR_PL_MASK;
- if (mask != (data->page_flags & PAGE_FLAGS_AR_PL_MASK)) {
- data->page_flags &= ~PAGE_FLAGS_AR_PL_MASK;
- data->page_flags |= mask;
- machine_tlb_purge(ifa, ps);
- return 1;
- }
- return 0;
- }
- data = data->next;
- } while(data);
- }
- return 0;
-}
-
-/*
- * vhpt lookup
- */
-
-thash_data_t * vhpt_lookup(u64 va)
-{
- thash_data_t *hash, *head;
- u64 tag, pte, itir;
-
- head = (thash_data_t *)ia64_thash(va);
- hash = head;
- tag = ia64_ttag(va);
- do {
- if (hash->etag == tag)
- break;
- hash = hash->next;
- } while(hash);
- if (hash && hash != head) {
- /* Put the entry on the front of the list (ie swap hash and head). */
- pte = hash->page_flags;
- hash->page_flags = head->page_flags;
- head->page_flags = pte;
-
- tag = hash->etag;
- hash->etag = head->etag;
- head->etag = tag;
-
- itir = hash->itir;
- hash->itir = head->itir;
- head->itir = itir;
-
- return head;
- }
- return hash;
-}
-
-u64 guest_vhpt_lookup(u64 iha, u64 *pte)
-{
- u64 ret, tmp;
- thash_data_t * data;
-
- /* Try to fill mTLB for the gVHPT entry. */
- data = vhpt_lookup(iha);
- if (data == NULL) {
- data = __vtr_lookup(current, iha, DSIDE_TLB);
- if (data != NULL)
- thash_vhpt_insert(current, data->page_flags, data->itir,
- iha, DSIDE_TLB);
- }
-
- asm volatile ("rsm psr.ic|psr.i;;"
- "srlz.d;;"
- "ld8.s %1=[%2];;" /* Read VHPT entry. */
- "tnat.nz p6,p7=%1;;" /* Success ? */
- "(p6) mov %0=1;" /* No -> ret = 1. */
- "(p6) mov %1=r0;"
- "(p7) extr.u %1=%1,0,53;;" /* Yes -> mask ig bits. */
- "(p7) mov %0=r0;" /* -> ret = 0. */
- "(p7) st8 [%3]=%1;;" /* -> save. */
- "ssm psr.ic;;"
- "srlz.d;;"
- "ssm psr.i;;"
- : "=r"(ret), "=r"(tmp)
- : "r"(iha), "r"(pte):"memory","p6","p7");
- return ret;
-}
-
-static thash_data_t * vtlb_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
-{
- u64 index, pfn, rid;
-
- pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
- rid = _REGION_ID(vrr);
- index = (pfn ^ rid) & ((1UL << (vpta.size - 5)) - 1);
- *tag = pfn ^ (rid << 39);
- return (thash_data_t *)((vpta.base << PTA_BASE_SHIFT) + (index << 5));
-}
-
-/*
- * purge software guest tlb
- */
-
-static void vtlb_purge(VCPU *v, u64 va, u64 ps)
-{
- thash_data_t *cur;
- u64 start, curadr, size, psbits, tag, rr_ps, num;
- ia64_rr vrr;
- thash_cb_t *hcb = &v->arch.vtlb;
-
- vcpu_get_rr(v, va, &vrr.rrval);
- psbits = VMX(v, psbits[(va >> 61)]);
- start = va & ~((1UL << ps) - 1);
- while (psbits) {
- curadr = start;
- rr_ps = __ffs(psbits);
- psbits &= ~(1UL << rr_ps);
- num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
- size = PSIZE(rr_ps);
- vrr.ps = rr_ps;
- while (num) {
- cur = vtlb_thash(hcb->pta, curadr, vrr.rrval, &tag);
- while (cur) {
- if (cur->etag == tag && cur->ps == rr_ps) {
- cur->etag = 1UL << 63;
- break;
- }
- cur = cur->next;
- }
- curadr += size;
- num--;
- }
- }
-}
-
-
-/*
- * purge VHPT and machine TLB
- */
-static void vhpt_purge(VCPU *v, u64 va, u64 ps)
-{
- //thash_cb_t *hcb = &v->arch.vhpt;
- thash_data_t *cur;
- u64 start, size, tag, num;
- ia64_rr rr;
-
- start = va & ~((1UL << ps) - 1);
- rr.rrval = ia64_get_rr(va);
- size = PSIZE(rr.ps);
- num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
- while (num) {
- cur = (thash_data_t *)ia64_thash(start);
- tag = ia64_ttag(start);
- while (cur) {
- if (cur->etag == tag) {
- cur->etag = 1UL << 63;
- break;
- }
- cur = cur->next;
- }
- start += size;
- num--;
- }
- machine_tlb_purge(va, ps);
-}
-
-/*
- * Recycle all collisions chain in VTLB or VHPT.
- *
- */
-void thash_recycle_cch_all(thash_cb_t *hcb)
-{
- int num;
- thash_data_t *head;
-
- head = hcb->hash;
- num = (hcb->hash_sz/sizeof(thash_data_t));
- do {
- head->next = 0;
- head++;
- num--;
- } while(num);
- cch_mem_init(hcb);
-}
-
-
-static thash_data_t *__alloc_chain(thash_cb_t *hcb)
-{
- thash_data_t *cch;
-
- cch = cch_alloc(hcb);
- if (cch == NULL) {
- thash_recycle_cch_all(hcb);
- cch = cch_alloc(hcb);
- }
- return cch;
-}
-
-/*
- * Insert an entry into hash TLB or VHPT.
- * NOTES:
- * 1: When inserting VHPT to thash, "va" is a must covered
- * address by the inserted machine VHPT entry.
- * 2: The format of entry is always in TLB.
- * 3: The caller need to make sure the new entry will not overlap
- * with any existed entry.
- */
-static void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va)
-{
- thash_data_t *hash_table, *cch, *tail;
- /* int flag; */
- ia64_rr vrr;
- /* u64 gppn, ppns, ppne; */
- u64 tag, len;
- thash_cb_t *hcb = &v->arch.vtlb;
-
- vcpu_quick_region_set(PSCBX(v, tc_regions), va);
-
- vcpu_get_rr(v, va, &vrr.rrval);
- vrr.ps = itir_ps(itir);
- VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
- hash_table = vtlb_thash(hcb->pta, va, vrr.rrval, &tag);
- len = 0;
- cch = hash_table;
- do {
- if (INVALID_TLB(cch)) {
- cch->page_flags = pte;
- cch->itir = itir;
- cch->etag = tag;
- return;
- }
- ++len;
- tail = cch;
- cch = cch->next;
- } while(cch);
- if (len >= MAX_CCN_DEPTH) {
- thash_recycle_cch(hcb, hash_table, tail);
- cch = cch_alloc(hcb);
- }
- else {
- cch = __alloc_chain(hcb);
- }
- cch->page_flags = pte;
- cch->itir = itir;
- cch->etag = tag;
- cch->next = hash_table->next;
- wmb();
- hash_table->next = cch;
- return;
-}
-
-
-int vtr_find_overlap(VCPU *vcpu, u64 va, u64 ps, int is_data)
-{
- thash_data_t *trp;
- int i;
- u64 end, rid;
-
- vcpu_get_rr(vcpu, va, &rid);
- rid &= RR_RID_MASK;
- end = va + PSIZE(ps);
- if (is_data) {
- if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
- trp = (thash_data_t *)vcpu->arch.dtrs;
- for (i = 0; i < NDTRS; i++, trp++) {
- if (__is_tr_overlap(trp, rid, va, end )) {
- return i;
- }
- }
- }
- }
- else {
- if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
- trp = (thash_data_t *)vcpu->arch.itrs;
- for (i = 0; i < NITRS; i++, trp++) {
- if (__is_tr_overlap(trp, rid, va, end )) {
- return i;
- }
- }
- }
- }
- return -1;
-}
-
-/*
- * Purge entries in VTLB and VHPT
- */
-void thash_purge_entries(VCPU *v, u64 va, u64 ps)
-{
- if (vcpu_quick_region_check(v->arch.tc_regions, va))
- vtlb_purge(v, va, ps);
- vhpt_purge(v, va, ps);
-}
-
-void thash_purge_entries_remote(VCPU *v, u64 va, u64 ps)
-{
- u64 old_va = va;
- va = REGION_OFFSET(va);
- if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
- vtlb_purge(v, va, ps);
- vhpt_purge(v, va, ps);
-}
-
-static u64 translate_phy_pte(VCPU *v, u64 pte, u64 itir, u64 va)
-{
- u64 ps, ps_mask, paddr, maddr;
- union pte_flags phy_pte;
- struct domain *d = v->domain;
-
- ps = itir_ps(itir);
- ps_mask = ~((1UL << ps) - 1);
- phy_pte.val = pte;
- paddr = ((pte & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
- maddr = lookup_domain_mpa(d, paddr, NULL);
- if (maddr & _PAGE_IO)
- return -1;
-
- /* Ensure WB attribute if pte is related to a normal mem page,
- * which is required by vga acceleration since qemu maps shared
- * vram buffer with WB.
- */
- if (mfn_valid(pte_pfn(__pte(maddr))) && phy_pte.ma != VA_MATTR_NATPAGE)
- phy_pte.ma = VA_MATTR_WB;
-
- maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
- phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
-
- /* If shadow mode is enabled, virtualize dirty bit. */
- if (shadow_mode_enabled(d) && phy_pte.d) {
- u64 gpfn = paddr >> PAGE_SHIFT;
- phy_pte.val |= _PAGE_VIRT_D;
-
- /* If the page is not already dirty, don't set the dirty bit! */
- if (gpfn < d->arch.shadow_bitmap_size * 8
- && !test_bit(gpfn, d->arch.shadow_bitmap))
- phy_pte.d = 0;
- }
-
- return phy_pte.val;
-}
-
-
-/*
- * Purge overlap TCs and then insert the new entry to emulate itc ops.
- * Notes: Only TC entry can purge and insert.
- * 1 indicates this is MMIO
- */
-int thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa, int type)
-{
- u64 ps, phy_pte, psr;
- ia64_rr mrr;
-
- ps = itir_ps(itir);
- mrr.rrval = ia64_get_rr(ifa);
-
- phy_pte = translate_phy_pte(v, pte, itir, ifa);
-
- vtlb_purge(v, ifa, ps);
- vhpt_purge(v, ifa, ps);
-
- if (phy_pte == -1) {
- vtlb_insert(v, pte, itir, ifa);
- return 1;
- }
-
- if (ps != mrr.ps)
- vtlb_insert(v, pte, itir, ifa);
-
- if (ps >= mrr.ps) {
- vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
- } else { /* Subpaging */
- phy_pte &= ~PAGE_FLAGS_RV_MASK;
- psr = ia64_clear_ic();
- ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0));
- ia64_set_psr(psr);
- ia64_srlz_i();
- }
- return 0;
-}
-
-/*
- * Purge all TCs or VHPT entries including those in Hash table.
- *
- */
-
-//TODO: add sections.
-void thash_purge_all(VCPU *v)
-{
- int num;
- thash_data_t *head;
- thash_cb_t *vtlb,*vhpt;
- vtlb = &v->arch.vtlb;
- vhpt = &v->arch.vhpt;
-
- for (num = 0; num < 8; num++)
- VMX(v, psbits[num]) = 0;
-
- head = vtlb->hash;
- num = (vtlb->hash_sz/sizeof(thash_data_t));
- do{
- head->page_flags = 0;
- head->etag = 1UL<<63;
- head->itir = 0;
- head->next = 0;
- head++;
- num--;
- } while(num);
- cch_mem_init(vtlb);
-
- head = vhpt->hash;
- num = (vhpt->hash_sz/sizeof(thash_data_t));
- do{
- head->page_flags = 0;
- head->etag = 1UL<<63;
- head->next = 0;
- head++;
- num--;
- } while(num);
- cch_mem_init(vhpt);
- local_flush_tlb_all();
-}
-
-static void __thash_purge_all(void *arg)
-{
- struct vcpu *v = arg;
-
- BUG_ON(vcpu_runnable(v) || v->is_running);
- thash_purge_all(v);
-}
-
-void vmx_vcpu_flush_vtlb_all(VCPU *v)
-{
- if (v == current) {
- thash_purge_all(v);
- return;
- }
-
- /* SMP safe */
- vcpu_pause(v);
- if (v->processor == smp_processor_id())
- __thash_purge_all(v);
- else
- smp_call_function_single(v->processor, __thash_purge_all, v, 1);
- vcpu_unpause(v);
-}
-
-
-/*
- * Lookup the hash table and its collision chain to find an entry
- * covering this address rid:va or the entry.
- *
- * INPUT:
- * in: TLB format for both VHPT & TLB.
- */
-
-thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
-{
- thash_data_t *cch;
- u64 psbits, ps, tag;
- ia64_rr vrr;
- thash_cb_t *hcb = &v->arch.vtlb;
-
- cch = __vtr_lookup(v, va, is_data);
- if (cch)
- return cch;
-
- if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
- return NULL;
- psbits = VMX(v, psbits[(va >> 61)]);
- vcpu_get_rr(v, va, &vrr.rrval);
- while (psbits) {
- ps = __ffs(psbits);
- psbits &= ~(1UL << ps);
- vrr.ps = ps;
- cch = vtlb_thash(hcb->pta, va, vrr.rrval, &tag);
- do {
- if (cch->etag == tag && cch->ps == ps)
- goto found;
- cch = cch->next;
- } while(cch);
- }
- return NULL;
-found:
- if (unlikely(!cch->ed && is_data == ISIDE_TLB)) {
- /*The case is very rare, and it may lead to incorrect setting
- for itlb's ed bit! Purge it from hash vTLB and let guest os
- determin the ed bit of the itlb entry.*/
- vtlb_purge(v, va, ps);
- cch = NULL;
- }
- return cch;
-}
-
-
-/*
- * Initialize internal control data before service.
- */
-static void thash_init(thash_cb_t *hcb, u64 sz)
-{
- int num;
- thash_data_t *head;
-
- hcb->pta.val = (unsigned long)hcb->hash;
- hcb->pta.vf = 1;
- hcb->pta.ve = 1;
- hcb->pta.size = sz;
-
- head = hcb->hash;
- num = (hcb->hash_sz/sizeof(thash_data_t));
- do {
- head->page_flags = 0;
- head->itir = 0;
- head->etag = 1UL << 63;
- head->next = 0;
- head++;
- num--;
- } while(num);
-
- hcb->cch_free_idx = 0;
- hcb->cch_freelist = NULL;
-}
-
-int thash_alloc(thash_cb_t *hcb, u64 sz_log2, char *what)
-{
- struct page_info *page;
- void * vbase;
- u64 sz = 1UL << sz_log2;
-
- page = alloc_domheap_pages(NULL, (sz_log2 + 1 - PAGE_SHIFT), 0);
- if (page == NULL) {
- printk("No enough contiguous memory(%ldKB) for init_domain_%s\n",
- sz >> (10 - 1), what);
- return -ENOMEM;
- }
- vbase = page_to_virt(page);
- memset(vbase, 0, sz + sz); // hash + collisions chain
- if (sz_log2 >= 20 - 1)
- printk(XENLOG_DEBUG "Allocate domain %s at 0x%p(%ldMB)\n",
- what, vbase, sz >> (20 - 1));
- else
- printk(XENLOG_DEBUG "Allocate domain %s at 0x%p(%ldKB)\n",
- what, vbase, sz >> (10 - 1));
-
- hcb->hash = vbase;
- hcb->hash_sz = sz;
- hcb->cch_buf = (void *)((u64)vbase + hcb->hash_sz);
- hcb->cch_sz = sz;
- thash_init(hcb, sz_log2);
- return 0;
-}
-
-void thash_free(thash_cb_t *hcb)
-{
- struct page_info *page;
-
- if (hcb->hash) {
- page = virt_to_page(hcb->hash);
- free_domheap_pages(page, hcb->pta.size + 1 - PAGE_SHIFT);
- hcb->hash = 0;
- }
-}
diff --git a/xen/arch/ia64/xen/Makefile b/xen/arch/ia64/xen/Makefile
deleted file mode 100644
index cb7a5a3b8f..0000000000
--- a/xen/arch/ia64/xen/Makefile
+++ /dev/null
@@ -1,44 +0,0 @@
-subdir-y += oprofile
-subdir-y += cpufreq
-
-obj-y += relocate_kernel.o
-obj-y += machine_kexec.o
-obj-y += crash.o
-obj-y += dom0_ops.o
-obj-y += domain.o
-obj-y += dom_fw_asm.o
-obj-y += dom_fw_common.o
-obj-y += dom_fw_dom0.o
-obj-y += dom_fw_domu.o
-obj-y += dom_fw_utils.o
-obj-y += dom_fw_sn2.o
-obj-y += fw_emul.o
-obj-y += hpsimserial.o
-obj-y += hypercall.o
-obj-y += platform_hypercall.o
-obj-y += hyperprivop.o
-obj-y += idle0_task.o
-obj-y += irq.o
-obj-y += ivt.o
-obj-y += mm.o
-obj-y += mm_init.o
-obj-y += pcdp.o
-obj-y += privop.o
-obj-y += faults.o
-obj-y += regionreg.o
-obj-y += sn_console.o
-obj-y += vcpu.o
-obj-y += vhpt.o
-obj-y += xenasm.o
-obj-y += xenmem.o
-obj-y += xenmisc.o
-obj-y += xensetup.o
-obj-y += xentime.o
-obj-y += flushd.o
-obj-y += privop_stat.o
-obj-y += xenpatch.o
-obj-y += pci.o
-
-obj-$(crash_debug) += gdbstub.o
-obj-$(xen_ia64_tlb_track) += tlb_track.o
-obj-$(xen_ia64_tlbflush_clock) += flushtlb.o
diff --git a/xen/arch/ia64/xen/cpufreq/Makefile b/xen/arch/ia64/xen/cpufreq/Makefile
deleted file mode 100644
index 9ffb2d4917..0000000000
--- a/xen/arch/ia64/xen/cpufreq/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y += cpufreq.o
diff --git a/xen/arch/ia64/xen/cpufreq/cpufreq.c b/xen/arch/ia64/xen/cpufreq/cpufreq.c
deleted file mode 100644
index fac89013b6..0000000000
--- a/xen/arch/ia64/xen/cpufreq/cpufreq.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * arch/ia64/kernel/cpufreq/acpi-cpufreq.c
- * This file provides the ACPI based P-state support. This
- * module works with generic cpufreq infrastructure. Most of
- * the code is based on i386 version
- * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
- *
- * Copyright (C) 2005 Intel Corp
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- *
- * Sep 2008 - Liu Jinsong <jinsong.liu@intel.com>
- * porting IPF acpi-cpufreq.c from Linux 2.6.23 to Xen hypervisor
- */
-
-#include <xen/types.h>
-#include <xen/errno.h>
-#include <xen/delay.h>
-#include <xen/cpumask.h>
-#include <xen/sched.h>
-#include <xen/timer.h>
-#include <xen/xmalloc.h>
-#include <asm/bug.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/percpu.h>
-#include <asm/pal.h>
-#include <acpi/acpi.h>
-#include <acpi/cpufreq/cpufreq.h>
-
-static struct acpi_cpufreq_data *drv_data[NR_CPUS];
-
-static struct cpufreq_driver acpi_cpufreq_driver;
-
-static int
-processor_get_pstate (u32 *value)
-{
- u64 pstate_index = 0;
- s64 retval;
-
- retval = ia64_pal_get_pstate(&pstate_index,
- PAL_GET_PSTATE_TYPE_INSTANT);
- *value = (u32) pstate_index;
-
- if (retval)
- printk("Failed to get current freq\n");
-
- return (int)retval;
-}
-
-static unsigned int
-extract_clock (unsigned value)
-{
- unsigned long i;
- unsigned int cpu;
- struct processor_performance *perf;
-
- cpu = smp_processor_id();
- perf = &processor_pminfo[cpu]->perf;
-
- for (i = 0; i < perf->state_count; i++) {
- if (value == perf->states[i].status)
- return perf->states[i].core_frequency;
- }
- return perf->states[i-1].core_frequency;
-}
-
-static void
-processor_get_freq (void *data)
-{
- unsigned int *freq = data;
- int ret = 0;
- u32 value = 0;
- unsigned int clock_freq;
-
- ret = processor_get_pstate(&value);
- if (ret) {
- *freq = 0;
- return;
- }
-
- clock_freq = extract_clock(value);
- *freq = (clock_freq*1000);
- return;
-}
-
-static unsigned int
-acpi_cpufreq_get (unsigned int cpu)
-{
- unsigned int freq;
-
- if (!cpu_online(cpu))
- return 0;
-
- if (cpu == smp_processor_id())
- processor_get_freq((void*)&freq);
- else
- smp_call_function_single(cpu, processor_get_freq, &freq, 1);
-
- return freq;
-}
-
-static void
-processor_set_pstate (void *data)
-{
- u32 *value = data;
- s64 retval;
-
- retval = ia64_pal_set_pstate((u64)*value);
-
- if (retval)
- *value = 1;
- else
- *value = 0;
-}
-
-static int
-processor_set_freq (struct acpi_cpufreq_data *data,
- struct cpufreq_policy *policy, int state)
-{
- u32 value = 0;
- unsigned int cpu = policy->cpu;
-
- if (!cpu_online(cpu))
- return -ENODEV;
-
- if (state == data->acpi_data->state) {
- if (unlikely(policy->resume)) {
- printk(KERN_INFO
- "Called after resume, resetting to P%d\n",
- state);
- policy->resume = 0;
- } else {
- printk(KERN_DEBUG"Already at target state (P%d)\n",
- state);
- return 0;
- }
- }
-
- value = (u32) data->acpi_data->states[state].control;
-
- if (cpu == smp_processor_id())
- processor_set_pstate((void *)&value);
- else
- smp_call_function_single(cpu, processor_set_pstate, &value, 1);
-
- if (value) {
- printk(KERN_WARNING "Transition failed\n");
- return -ENODEV;
- }
-
- cpufreq_statistic_update(cpu, data->acpi_data->state, state);
-
- data->acpi_data->state = state;
- policy->cur = data->freq_table[state].frequency;
-
- return 0;
-}
-
-static int
-acpi_cpufreq_target (struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
-{
- struct acpi_cpufreq_data *data = drv_data[policy->cpu];
- unsigned int next_state = 0;
- unsigned int result = 0;
-
- result = cpufreq_frequency_table_target(policy,
- data->freq_table, target_freq, relation, &next_state);
- if (result)
- return (result);
-
- result = processor_set_freq(data, policy, next_state);
-
- return (result);
-}
-
-static int
-acpi_cpufreq_verify (struct cpufreq_policy *policy)
-{
- struct acpi_cpufreq_data *data = drv_data[policy->cpu];
- struct processor_performance *perf =
- &processor_pminfo[policy->cpu]->perf;
-
- if (!policy || !data)
- return -EINVAL;
-
- cpufreq_verify_within_limits(policy, 0,
- perf->states[perf->platform_limit].core_frequency * 1000);
-
- return cpufreq_frequency_table_verify(policy, data->freq_table);
-}
-
-static int
-acpi_cpufreq_cpu_init (struct cpufreq_policy *policy)
-{
- unsigned int i;
- unsigned int cpu = policy->cpu;
- unsigned int result = 0;
- struct acpi_cpufreq_data *data;
-
- data = xmalloc(struct acpi_cpufreq_data);
- if (!data)
- return -ENOMEM;
- memset(data, 0, sizeof(struct acpi_cpufreq_data));
-
- drv_data[cpu] = data;
-
- data->acpi_data = &processor_pminfo[cpu]->perf;
-
- data->freq_table = xmalloc_array(struct cpufreq_frequency_table,
- (data->acpi_data->state_count + 1));
- if (!data->freq_table) {
- result = -ENOMEM;
- goto err_unreg;
- }
-
- /* detect transition latency */
- policy->cpuinfo.transition_latency = 0;
- for (i=0; i<data->acpi_data->state_count; i++) {
- if ((data->acpi_data->states[i].transition_latency * 1000) >
- policy->cpuinfo.transition_latency) {
- policy->cpuinfo.transition_latency =
- data->acpi_data->states[i].transition_latency * 1000;
- }
- }
-
- policy->governor = cpufreq_opt_governor ? : CPUFREQ_DEFAULT_GOVERNOR;
-
- policy->cur = acpi_cpufreq_get(policy->cpu);
- printk(KERN_INFO "Current freq of CPU %u is %u\n", cpu, policy->cur);
-
- /* table init */
- for (i = 0; i <= data->acpi_data->state_count; i++) {
- data->freq_table[i].index = i;
- if (i < data->acpi_data->state_count) {
- data->freq_table[i].frequency =
- data->acpi_data->states[i].core_frequency * 1000;
- } else {
- data->freq_table[i].frequency = CPUFREQ_TABLE_END;
- }
- }
-
- result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
- if (result)
- goto err_freqfree;
-
- data->acpi_data->state = 0;
- policy->resume = 1;
-
- return result;
-
-err_freqfree:
- xfree(data->freq_table);
-err_unreg:
- xfree(data);
- drv_data[cpu] = NULL;
-
- return result;
-}
-
-static int
-acpi_cpufreq_cpu_exit (struct cpufreq_policy *policy)
-{
- struct acpi_cpufreq_data *data = drv_data[policy->cpu];
-
- if (data) {
- drv_data[policy->cpu] = NULL;
- xfree(data->freq_table);
- xfree(data);
- }
-
- return 0;
-}
-
-static struct cpufreq_driver acpi_cpufreq_driver = {
- .name = "acpi-cpufreq",
- .verify = acpi_cpufreq_verify,
- .target = acpi_cpufreq_target,
- .get = acpi_cpufreq_get,
- .init = acpi_cpufreq_cpu_init,
- .exit = acpi_cpufreq_cpu_exit,
-};
-
-static int __init cpufreq_driver_init(void)
-{
- int ret = 0;
-
- if (cpufreq_controller == FREQCTL_xen)
- ret = cpufreq_register_driver(&acpi_cpufreq_driver);
-
- return ret;
-}
-
-__initcall(cpufreq_driver_init);
-
-int cpufreq_cpu_init(unsigned int cpuid)
-{
- return cpufreq_add_cpu(cpuid);
-}
diff --git a/xen/arch/ia64/xen/crash.c b/xen/arch/ia64/xen/crash.c
deleted file mode 100644
index e998442a5a..0000000000
--- a/xen/arch/ia64/xen/crash.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- * crash.c
- *
- * Based heavily on arch/ia64/kernel/crash.c from Linux 2.6.20-rc1
- *
- * Xen port written by:
- * - Simon 'Horms' Horman <horms@verge.net.au>
- * - Magnus Damm <magnus@valinux.co.jp>
- */
-
-#include <xen/types.h> /* Should be included by xen/kexec.h ? */
-#include <linux/thread_info.h> /* Should be included by linux/preempt.h ? */
-
-#include <xen/kexec.h>
-#include <linux/hardirq.h>
-#include <linux/smp.h>
-#include <asm/processor.h>
-#include <asm/kexec.h>
-#include <xen/sched.h>
-
-void machine_crash_shutdown(void)
-{
- crash_xen_info_t *info;
- unsigned long dom0_mm_pgd_mfn;
-
- if (in_interrupt())
- ia64_eoi();
- kexec_crash_save_info();
- info = kexec_crash_save_info();
- /* Info is not word aligned on ia64 */
- dom0_mm_pgd_mfn = __pa(dom0->arch.mm.pgd) >> PAGE_SHIFT;
- memcpy((char *)info + offsetof(crash_xen_info_t, dom0_mm_pgd_mfn),
- &dom0_mm_pgd_mfn, sizeof(dom0_mm_pgd_mfn));
- kexec_disable_iosapic();
-#ifdef CONFIG_SMP
- smp_send_stop();
-#endif
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
-
diff --git a/xen/arch/ia64/xen/dom0_ops.c b/xen/arch/ia64/xen/dom0_ops.c
deleted file mode 100644
index 57a1905f32..0000000000
--- a/xen/arch/ia64/xen/dom0_ops.c
+++ /dev/null
@@ -1,878 +0,0 @@
-/******************************************************************************
- * Arch-specific dom0_ops.c
- *
- * Process command requests from domain-0 guest OS.
- *
- * Copyright (c) 2002, K A Fraser
- */
-
-#include <xen/config.h>
-#include <xen/types.h>
-#include <xen/lib.h>
-#include <xen/mm.h>
-#include <public/domctl.h>
-#include <public/sysctl.h>
-#include <xen/sched.h>
-#include <xen/event.h>
-#include <asm/pdb.h>
-#include <xen/trace.h>
-#include <xen/console.h>
-#include <xen/grant_table.h>
-#include <xen/guest_access.h>
-#include <xen/hypercall.h>
-#include <xen/pci.h>
-#include <asm/vmx.h>
-#include <asm/dom_fw.h>
-#include <asm/vhpt.h>
-#include <xen/iocap.h>
-#include <xen/errno.h>
-#include <xen/nodemask.h>
-#include <asm/dom_fw_utils.h>
-#include <asm/hvm/support.h>
-#include <xsm/xsm.h>
-#include <public/hvm/save.h>
-
-#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
-
-extern unsigned long total_pages;
-
-long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
-{
- long ret = 0;
-
- switch ( op->cmd )
- {
- case XEN_DOMCTL_getmemlist:
- {
- unsigned long i;
- struct domain *d = rcu_lock_domain_by_id(op->domain);
- unsigned long start_page = op->u.getmemlist.start_pfn;
- unsigned long nr_pages = op->u.getmemlist.max_pfns;
- uint64_t mfn;
-
- if ( d == NULL ) {
- ret = -EINVAL;
- break;
- }
-
- if ( !IS_PRIV_FOR(current->domain, d) ) {
- ret = -EPERM;
- rcu_unlock_domain(d);
- break;
- }
-
- for (i = 0 ; i < nr_pages ; i++) {
- pte_t *pte;
-
- pte = (pte_t *)lookup_noalloc_domain_pte(d,
- (start_page + i) << PAGE_SHIFT);
- if (pte && pte_present(*pte))
- mfn = start_page + i;
- else
- mfn = INVALID_MFN;
-
- if ( copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1) ) {
- ret = -EFAULT;
- break;
- }
- }
-
- op->u.getmemlist.num_pfns = i;
- if (copy_to_guest(u_domctl, op, 1))
- ret = -EFAULT;
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_arch_setup:
- {
- xen_domctl_arch_setup_t *ds = &op->u.arch_setup;
- struct domain *d = rcu_lock_domain_by_id(op->domain);
-
- if ( d == NULL) {
- ret = -EINVAL;
- break;
- }
-
- if ( !IS_PRIV_FOR(current->domain, d) ) {
- ret = -EPERM;
- rcu_unlock_domain(d);
- break;
- }
-
- if (ds->flags & XEN_DOMAINSETUP_query) {
- /* Set flags. */
- if (is_hvm_domain(d))
- ds->flags |= XEN_DOMAINSETUP_hvm_guest;
- /* Set params. */
- ds->bp = 0; /* unknown. */
- ds->maxmem = d->arch.convmem_end;
- ds->xsi_va = d->arch.shared_info_va;
- ds->hypercall_imm = d->arch.breakimm;
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
- ds->vhpt_size_log2 = d->arch.vhpt_size_log2;
-#endif
- /* Copy back. */
- if ( copy_to_guest(u_domctl, op, 1) )
- ret = -EFAULT;
- }
- else {
- if (is_hvm_domain(d)
- || (ds->flags & (XEN_DOMAINSETUP_hvm_guest
- | XEN_DOMAINSETUP_sioemu_guest))) {
- if (!vmx_enabled) {
- printk("No VMX hardware feature for vmx domain.\n");
- ret = -EINVAL;
- } else {
- d->is_hvm = 1;
- if (ds->flags & XEN_DOMAINSETUP_sioemu_guest)
- d->arch.is_sioemu = 1;
- xen_ia64_set_convmem_end(d, ds->maxmem);
- ret = vmx_setup_platform(d);
- }
- }
- else {
- if (ds->hypercall_imm) {
- /* dom_fw_setup() reads d->arch.breakimm */
- struct vcpu *v;
- d->arch.breakimm = ds->hypercall_imm;
- for_each_vcpu (d, v)
- v->arch.breakimm = d->arch.breakimm;
- }
- domain_set_vhpt_size(d, ds->vhpt_size_log2);
- if (ds->xsi_va)
- d->arch.shared_info_va = ds->xsi_va;
- ret = dom_fw_setup(d, ds->bp, ds->maxmem);
- }
- if (ret == 0) {
- /*
- * XXX IA64_SHARED_INFO_PADDR
- * assign these pages into guest psudo physical address
- * space for dom0 to map this page by gmfn.
- * this is necessary for domain build, save, restore and
- * dump-core.
- */
- unsigned long i;
- for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)
- assign_domain_page(d, IA64_SHARED_INFO_PADDR + i,
- virt_to_maddr(d->shared_info + i));
- }
- }
-
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_shadow_op:
- {
- struct domain *d;
- ret = -ESRCH;
- d = rcu_lock_domain_by_id(op->domain);
- if ( d != NULL )
- {
- if ( !IS_PRIV_FOR(current->domain, d) ) {
- ret = -EPERM;
- rcu_unlock_domain(d);
- break;
- }
-
- ret = shadow_mode_control(d, &op->u.shadow_op);
- rcu_unlock_domain(d);
- if (copy_to_guest(u_domctl, op, 1))
- ret = -EFAULT;
- }
- }
- break;
-
- case XEN_DOMCTL_ioport_permission:
- {
- struct domain *d;
- unsigned int fp = op->u.ioport_permission.first_port;
- unsigned int np = op->u.ioport_permission.nr_ports;
- unsigned int lp = fp + np - 1;
-
- ret = -ESRCH;
- d = rcu_lock_domain_by_id(op->domain);
- if (unlikely(d == NULL))
- break;
-
- if ( !IS_PRIV_FOR(current->domain, d) ) {
- ret = -EPERM;
- rcu_unlock_domain(d);
- break;
- }
-
- if (np == 0)
- ret = 0;
- else {
- if (op->u.ioport_permission.allow_access)
- ret = ioports_permit_access(d, fp, fp, lp);
- else
- ret = ioports_deny_access(d, fp, lp);
- }
-
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_sendtrigger:
- {
- struct domain *d;
- struct vcpu *v;
-
- ret = -ESRCH;
- d = rcu_lock_domain_by_id(op->domain);
- if ( d == NULL )
- break;
-
- ret = -EPERM;
- if ( !IS_PRIV_FOR(current->domain, d) ) {
- goto sendtrigger_out;
- }
-
- ret = -EINVAL;
- if ( op->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
- goto sendtrigger_out;
-
- ret = -ESRCH;
- if ( op->u.sendtrigger.vcpu >= d->max_vcpus ||
- (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL )
- goto sendtrigger_out;
-
- ret = 0;
- switch (op->u.sendtrigger.trigger)
- {
- case XEN_DOMCTL_SENDTRIGGER_INIT:
- {
- if (VMX_DOMAIN(v))
- vmx_pend_pal_init(d);
- else
- ret = -ENOSYS;
- }
- break;
-
- default:
- ret = -ENOSYS;
- }
-
- sendtrigger_out:
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_bind_pt_irq:
- {
- struct domain * d;
- xen_domctl_bind_pt_irq_t * bind;
-
- ret = -ESRCH;
- if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
- break;
- bind = &(op->u.bind_pt_irq);
- if ( iommu_enabled )
- ret = pt_irq_create_bind_vtd(d, bind);
- if ( ret < 0 )
- gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_unbind_pt_irq:
- {
- struct domain * d;
- xen_domctl_bind_pt_irq_t * bind;
-
- ret = -ESRCH;
- if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
- break;
- bind = &(op->u.bind_pt_irq);
- if ( iommu_enabled )
- ret = pt_irq_destroy_bind_vtd(d, bind);
- if ( ret < 0 )
- gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n");
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_memory_mapping:
- {
- struct domain *d;
- unsigned long gfn = op->u.memory_mapping.first_gfn;
- unsigned long mfn = op->u.memory_mapping.first_mfn;
- unsigned long nr_mfns = op->u.memory_mapping.nr_mfns;
- int i;
-
- ret = -EINVAL;
- if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
- break;
-
- ret = -ESRCH;
- if ( unlikely((d = rcu_lock_domain_by_id(op->domain)) == NULL) )
- break;
-
- ret=0;
- if ( op->u.memory_mapping.add_mapping )
- {
- gdprintk(XENLOG_INFO,
- "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
- gfn, mfn, nr_mfns);
-
- ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
- for ( i = 0; i < nr_mfns; i++ )
- assign_domain_mmio_page(d, (gfn+i)<<PAGE_SHIFT,
- (mfn+i)<<PAGE_SHIFT, PAGE_SIZE,
- ASSIGN_writable | ASSIGN_nocache);
- }
- else
- {
- gdprintk(XENLOG_INFO,
- "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
- gfn, mfn, nr_mfns);
-
- for ( i = 0; i < nr_mfns; i++ )
- deassign_domain_mmio_page(d, (gfn+i)<<PAGE_SHIFT,
- (mfn+i)<<PAGE_SHIFT, PAGE_SIZE);
- ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
- }
-
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_ioport_mapping:
- {
-
-#define MAX_IOPORTS 0x10000
- struct domain *d;
- unsigned int fgp = op->u.ioport_mapping.first_gport;
- unsigned int fmp = op->u.ioport_mapping.first_mport;
- unsigned int np = op->u.ioport_mapping.nr_ports;
-
- ret = -EINVAL;
- if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
- ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
- {
- gdprintk(XENLOG_ERR,
- "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
- fgp, fmp, np);
- break;
- }
-
- ret = -ESRCH;
- if ( unlikely((d = rcu_lock_domain_by_id(op->domain)) == NULL) )
- break;
-
- if ( op->u.ioport_mapping.add_mapping )
- {
- gdprintk(XENLOG_INFO,
- "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
- fgp, fmp, np);
-
- ret = ioports_permit_access(d, fgp, fmp, fmp + np - 1);
- }
- else
- {
- gdprintk(XENLOG_INFO,
- "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
- fgp, fmp, np);
-
- ret = ioports_deny_access(d, fgp, fgp + np - 1);
- }
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_sethvmcontext:
- {
- struct hvm_domain_context c;
- struct domain *d;
-
- c.cur = 0;
- c.size = op->u.hvmcontext.size;
- c.data = NULL;
-
- ret = -ESRCH;
- d = rcu_lock_domain_by_id(op->domain);
- if (d == NULL)
- break;
-
- ret = -EPERM;
- if ( !IS_PRIV_FOR(current->domain, d) )
- goto sethvmcontext_out;
-
-#ifdef CONFIG_X86
- ret = xsm_hvmcontext(d, op->cmd);
- if (ret)
- goto sethvmcontext_out;
-#endif /* CONFIG_X86 */
-
- ret = -EINVAL;
- if (!is_hvm_domain(d))
- goto sethvmcontext_out;
-
- ret = -ENOMEM;
- c.data = xmalloc_bytes(c.size);
- if (c.data == NULL)
- goto sethvmcontext_out;
-
- ret = -EFAULT;
- if (copy_from_guest(c.data, op->u.hvmcontext.buffer, c.size) != 0)
- goto sethvmcontext_out;
-
- domain_pause(d);
- ret = hvm_load(d, &c);
- domain_unpause(d);
-
- sethvmcontext_out:
- if (c.data != NULL)
- xfree(c.data);
-
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_gethvmcontext:
- {
- struct hvm_domain_context c;
- struct domain *d;
-
- ret = -ESRCH;
- d = rcu_lock_domain_by_id(op->domain);
- if (d == NULL)
- break;
-
- ret = -EPERM;
- if ( !IS_PRIV_FOR(current->domain, d) )
- goto gethvmcontext_out;
-
-#ifdef CONFIG_X86
- ret = xsm_hvmcontext(d, op->cmd);
- if (ret)
- goto gethvmcontext_out;
-#endif /* CONFIG_X86 */
-
- ret = -EINVAL;
- if (!is_hvm_domain(d))
- goto gethvmcontext_out;
-
- c.cur = 0;
- c.size = hvm_save_size(d);
- c.data = NULL;
-
- if (guest_handle_is_null(op->u.hvmcontext.buffer)) {
- /* Client is querying for the correct buffer size */
- op->u.hvmcontext.size = c.size;
- ret = 0;
- goto gethvmcontext_out;
- }
-
- /* Check that the client has a big enough buffer */
- ret = -ENOSPC;
- if (op->u.hvmcontext.size < c.size)
- goto gethvmcontext_out;
-
- /* Allocate our own marshalling buffer */
- ret = -ENOMEM;
- c.data = xmalloc_bytes(c.size);
- if (c.data == NULL)
- goto gethvmcontext_out;
-
- domain_pause(d);
- ret = hvm_save(d, &c);
- domain_unpause(d);
-
- op->u.hvmcontext.size = c.cur;
- if (copy_to_guest(op->u.hvmcontext.buffer, c.data, c.size) != 0)
- ret = -EFAULT;
-
- gethvmcontext_out:
- if (copy_to_guest(u_domctl, op, 1))
- ret = -EFAULT;
-
- if (c.data != NULL)
- xfree(c.data);
-
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_set_opt_feature:
- {
- struct xen_ia64_opt_feature *optf = &op->u.set_opt_feature.optf;
- struct domain *d = rcu_lock_domain_by_id(op->domain);
-
- if (d == NULL) {
- ret = -EINVAL;
- break;
- }
-
- ret = -EPERM;
- if ( IS_PRIV_FOR(current->domain, d) )
- ret = domain_opt_feature(d, optf);
-
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_set_address_size:
- {
- struct domain *d = rcu_lock_domain_by_id(op->domain);
-
- ret = -ESRCH;
- if (d == NULL)
- break;
-
- ret = -EINVAL;
- if (op->u.address_size.size == BITS_PER_LONG)
- ret = 0;
-
- rcu_unlock_domain(d);
- }
- break;
-
- case XEN_DOMCTL_get_address_size:
- {
- struct domain *d = rcu_lock_domain_by_id(op->domain);
-
- ret = -ESRCH;
- if (d == NULL)
- break;
-
- ret = 0;
- op->u.address_size.size = BITS_PER_LONG;
- rcu_unlock_domain(d);
-
- if (copy_to_guest(u_domctl, op, 1))
- ret = -EFAULT;
- }
- break;
-
- case XEN_DOMCTL_mem_sharing_op:
- {
- xen_domctl_mem_sharing_op_t *mec = &op->u.mem_sharing_op;
- struct domain *d = rcu_lock_domain_by_id(op->domain);
-
- ret = -ESRCH;
- if (d == NULL)
- break;
-
- switch(mec->op)
- {
- case XEN_DOMCTL_MEM_SHARING_CONTROL:
- {
- if (mec->u.enable) {
- ret = -EINVAL; /* not implemented */
- break;
- }
- ret = 0;
- }
- break;
-
- default:
- ret = -ENOSYS;
- }
-
- rcu_unlock_domain(d);
- }
- break;
-
- default:
- ret = iommu_do_domctl(op, u_domctl);
- break;
-
- }
-
- return ret;
-}
-
-long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
-{
- long ret = 0;
-
- switch ( op->cmd )
- {
- case XEN_SYSCTL_physinfo:
- {
- xen_sysctl_physinfo_t *pi = &op->u.physinfo;
-
- memset(pi, 0, sizeof(*pi));
- pi->threads_per_core = cpumask_weight(per_cpu(cpu_sibling_mask, 0));
- pi->cores_per_socket =
- cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
- pi->nr_nodes = (u32)num_online_nodes();
- pi->nr_cpus = (u32)num_online_cpus();
- pi->total_pages = total_pages;
- pi->free_pages = avail_domheap_pages();
- pi->scrub_pages = 0;
- pi->cpu_khz = local_cpu_data->proc_freq / 1000;
-
- pi->max_node_id = MAX_NUMNODES-1;
- pi->max_cpu_id = NR_CPUS-1;
-
- if ( copy_field_to_guest(u_sysctl, op, u.physinfo) )
- ret = -EFAULT;
- }
- break;
-
- case XEN_SYSCTL_topologyinfo:
- {
- xen_sysctl_topologyinfo_t *ti = &op->u.topologyinfo;
- XEN_GUEST_HANDLE_64(uint32) arr;
- uint32_t i, val, max_array_ent = ti->max_cpu_index;
-
- ti->max_cpu_index = cpumask_last(&cpu_online_map);
- max_array_ent = min(max_array_ent, ti->max_cpu_index);
-
- arr = ti->cpu_to_core;
- if ( !guest_handle_is_null(arr) )
- {
- for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
- {
- val = cpu_online(i) ? cpu_to_core(i) : ~0u;
- if ( copy_to_guest_offset(arr, i, &val, 1) )
- ret = -EFAULT;
- }
- }
-
- arr = ti->cpu_to_socket;
- if ( !guest_handle_is_null(arr) )
- {
- for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
- {
- val = cpu_online(i) ? cpu_to_socket(i) : ~0u;
- if ( copy_to_guest_offset(arr, i, &val, 1) )
- ret = -EFAULT;
- }
- }
-
- arr = ti->cpu_to_node;
- if ( !guest_handle_is_null(arr) )
- {
- for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
- {
- val = cpu_online(i) ? cpu_to_node(i) : ~0u;
- if ( copy_to_guest_offset(arr, i, &val, 1) )
- ret = -EFAULT;
- }
- }
-
- if ( copy_field_to_guest(u_sysctl, op, u.topologyinfo.max_cpu_index) )
- ret = -EFAULT;
- }
- break;
-
- case XEN_SYSCTL_numainfo:
- {
- uint32_t i, j, max_node_index, last_online_node;
- xen_sysctl_numainfo_t *ni = &op->u.numainfo;
-
- last_online_node = last_node(node_online_map);
- max_node_index = min_t(uint32_t, ni->max_node_index, last_online_node);
- ni->max_node_index = last_online_node;
-
- for (i = 0; i <= max_node_index; i++) {
- if (!guest_handle_is_null(ni->node_to_memsize)) {
- uint64_t memsize = node_online(i) ?
- node_memblk[i].size << PAGE_SHIFT : 0ul;
- if (copy_to_guest_offset(ni->node_to_memsize, i, &memsize, 1))
- break;
- }
- if (!guest_handle_is_null(ni->node_to_memfree)) {
- uint64_t memfree = node_online(i) ?
- avail_node_heap_pages(i) << PAGE_SHIFT : 0ul;
- if (copy_to_guest_offset(ni->node_to_memfree, i, &memfree, 1))
- break;
- }
-
- if (!guest_handle_is_null(ni->node_to_node_distance)) {
- for (j = 0; j <= max_node_index; j++) {
- uint32_t distance = ~0u;
- if (node_online(i) && node_online (j))
- distance = node_distance(i, j);
-
- if (copy_to_guest_offset(
- ni->node_to_node_distance,
- i*(max_node_index+1) + j, &distance, 1))
- break;
- }
- if (j <= max_node_index)
- break;
- }
- }
-
- ret = ((i <= max_node_index) || copy_to_guest(u_sysctl, op, 1))
- ? -EFAULT : 0;
- }
- break;
-
- default:
- printk("arch_do_sysctl: unrecognized sysctl: %d!!!\n",op->cmd);
- ret = -ENOSYS;
-
- }
-
- return ret;
-}
-
-static unsigned long
-dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
-{
- unsigned long end;
-
- /* Linux may use a 0 size! */
- if (size == 0)
- size = PAGE_SIZE;
-
- if (size == 0)
- printk(XENLOG_WARNING "ioremap(): Trying to map %lx, size 0\n", mpaddr);
-
- end = PAGE_ALIGN(mpaddr + size);
-
- if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
- (end >> PAGE_SHIFT) - 1))
- return -EPERM;
-
- return assign_domain_mmio_page(d, mpaddr, mpaddr, size,
- ASSIGN_writable | ASSIGN_nocache);
-}
-
-static unsigned long
-dom0vp_fpswa_revision(XEN_GUEST_HANDLE(uint) revision)
-{
- if (fpswa_interface == NULL)
- return -ENOSYS;
- if (copy_to_guest(revision, &fpswa_interface->revision, 1))
- return -EFAULT;
- return 0;
-}
-
-static unsigned long
-dom0vp_add_io_space(struct domain *d, unsigned long phys_base,
- unsigned long sparse, unsigned long space_number)
-{
- unsigned int fp, lp;
-
- /*
- * Registering new io_space roughly based on linux
- * arch/ia64/pci/pci.c:new_space()
- */
-
- /* Skip legacy I/O port space, we already know about it */
- if (phys_base == 0)
- return 0;
-
- /*
- * Dom0 Linux initializes io spaces sequentially, if that changes,
- * we'll need to add thread protection and the ability to handle
- * a sparsely populated io_space array.
- */
- if (space_number > MAX_IO_SPACES || space_number != num_io_spaces)
- return -EINVAL;
-
- io_space[space_number].mmio_base = phys_base;
- io_space[space_number].sparse = sparse;
-
- num_io_spaces++;
-
- fp = space_number << IO_SPACE_BITS;
- lp = fp | 0xffff;
-
- return ioports_permit_access(d, fp, fp, lp);
-}
-
-unsigned long
-do_dom0vp_op(unsigned long cmd,
- unsigned long arg0, unsigned long arg1, unsigned long arg2,
- unsigned long arg3)
-{
- unsigned long ret = 0;
- struct domain *d = current->domain;
-
- switch (cmd) {
- case IA64_DOM0VP_ioremap:
- ret = dom0vp_ioremap(d, arg0, arg1);
- break;
- case IA64_DOM0VP_phystomach:
- ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
- if (ret == INVALID_MFN) {
- dprintk(XENLOG_INFO, "%s: INVALID_MFN ret: 0x%lx\n",
- __func__, ret);
- } else {
- ret = pte_pfn(__pte(ret));
- }
- perfc_incr(dom0vp_phystomach);
- break;
- case IA64_DOM0VP_machtophys:
- if (!mfn_valid(arg0)) {
- ret = INVALID_M2P_ENTRY;
- break;
- }
- ret = get_gpfn_from_mfn(arg0);
- perfc_incr(dom0vp_machtophys);
- break;
- case IA64_DOM0VP_zap_physmap:
- ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
- break;
- case IA64_DOM0VP_add_physmap:
- if (!IS_PRIV(d))
- return -EPERM;
- ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
- (domid_t)arg3);
- break;
- case IA64_DOM0VP_add_physmap_with_gmfn:
- if (!IS_PRIV(d))
- return -EPERM;
- ret = dom0vp_add_physmap_with_gmfn(d, arg0, arg1, (unsigned int)arg2,
- (domid_t)arg3);
- break;
- case IA64_DOM0VP_expose_p2m:
- ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3);
- break;
- case IA64_DOM0VP_perfmon: {
- XEN_GUEST_HANDLE(void) hnd;
- set_xen_guest_handle(hnd, (void*)arg1);
- ret = do_perfmon_op(arg0, hnd, arg2);
- break;
- }
- case IA64_DOM0VP_fpswa_revision: {
- XEN_GUEST_HANDLE(uint) hnd;
- set_xen_guest_handle(hnd, (uint*)arg0);
- ret = dom0vp_fpswa_revision(hnd);
- break;
- }
- case IA64_DOM0VP_add_io_space:
- ret = dom0vp_add_io_space(d, arg0, arg1, arg2);
- break;
- case IA64_DOM0VP_expose_foreign_p2m: {
- XEN_GUEST_HANDLE(char) hnd;
- set_xen_guest_handle(hnd, (char*)arg2);
- ret = dom0vp_expose_foreign_p2m(d, arg0, (domid_t)arg1, hnd, arg3);
- break;
- }
- case IA64_DOM0VP_unexpose_foreign_p2m:
- ret = dom0vp_unexpose_foreign_p2m(d, arg0, arg1);
- break;
- case IA64_DOM0VP_get_memmap: {
- XEN_GUEST_HANDLE(char) hnd;
- set_xen_guest_handle(hnd, (char*)arg1);
- ret = dom0vp_get_memmap((domid_t)arg0, hnd);
- break;
- }
- default:
- ret = -1;
- printk("unknown dom0_vp_op 0x%lx\n", cmd);
- break;
- }
-
- return ret;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/ia64/xen/dom_fw_asm.S b/xen/arch/ia64/xen/dom_fw_asm.S
deleted file mode 100644
index 560f94e621..0000000000
--- a/xen/arch/ia64/xen/dom_fw_asm.S
+++ /dev/null
@@ -1,43 +0,0 @@
-#include <asm/dom_fw.h>
-
-// moved from xenasm.S to be shared by xen and libxc
-/*
- * Assembly support routines for Xen/ia64
- *
- * Copyright (C) 2004 Hewlett-Packard Co
- * Dan Magenheimer <dan.magenheimer@hp.com>
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- */
-
-// These instructions are copied in the domains.
-// This is the virtual PAL, which simply does a hypercall.
-// The size is 2 bundles (32 Bytes). It handles both static and stacked
-// convention.
-// If you modify this code, you have to modify dom_fw.h (for the size) and
-// dom_fw_pal_hypercall_patch.
-
-// This file is shared with xen and libxc.
-// GLOBAL_ENTRY() end END() macro can't be used.
-
-//GLOBAL_ENTRY(xen_ia64_pal_call_stub)
-.global xen_ia64_pal_call_stub;
-.align 32;
-.proc xen_ia64_pal_call_stub;
-xen_ia64_pal_call_stub:
- {
- .mii
- addl r2=FW_HYPERCALL_PAL_CALL_ASM,r0 // Hypercall number (Value is patched).
- mov r9=256
- ;;
- cmp.gtu p7,p8=r9,r28 /* r32 <= 255? */
- }
- {
- .mbb
- break __IA64_XEN_HYPERCALL_DEFAULT // Hypercall vector (Value is patched).
-(p7) br.cond.sptk.few rp
-(p8) br.ret.sptk.few rp
- }
-//END(xen_ia64_pal_call_stub)
-.endp xen_ia64_pal_call_stub
diff --git a/xen/arch/ia64/xen/dom_fw_common.c b/xen/arch/ia64/xen/dom_fw_common.c
deleted file mode 100644
index 78ef785b04..0000000000
--- a/xen/arch/ia64/xen/dom_fw_common.c
+++ /dev/null
@@ -1,706 +0,0 @@
-/*
- * Xen domain firmware emulation support
- * Copyright (C) 2004 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- *
- * Copyright (c) 2006, 2007
- * Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * dom0 vp model support
- */
-
-#ifdef __XEN__
-#include <asm/system.h>
-#include <asm/dom_fw_dom0.h>
-#include <asm/dom_fw_utils.h>
-#else
-#include <string.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <assert.h>
-#include <inttypes.h>
-
-#include <xen/xen.h>
-#include <asm/bundle.h>
-
-#include "xg_private.h"
-#include "xc_dom.h"
-#include "ia64/xc_dom_ia64_util.h"
-
-#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
-#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS /* XXX */
-#endif /* __XEN__ */
-
-#include <xen/acpi.h>
-#include <acpi/actables.h>
-#include <asm/dom_fw.h>
-#include <asm/dom_fw_domu.h>
-
-void
-xen_ia64_efi_make_md(efi_memory_desc_t *md,
- uint32_t type, uint64_t attr,
- uint64_t start, uint64_t end)
-{
- md->type = type;
- md->pad = 0;
- md->phys_addr = start;
- md->virt_addr = 0;
- md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
- md->attribute = attr;
-}
-
-#define EFI_HYPERCALL_PATCH(tgt, call) \
- do { \
- dom_efi_hypercall_patch(brkimm, \
- FW_HYPERCALL_##call##_PADDR, \
- FW_HYPERCALL_##call, hypercalls_imva); \
- /* Descriptor address. */ \
- tables->efi_runtime.tgt = \
- FW_FIELD_MPA(func_ptrs) + 8 * pfn; \
- /* Descriptor. */ \
- tables->func_ptrs[pfn++] = FW_HYPERCALL_##call##_PADDR; \
- tables->func_ptrs[pfn++] = 0; \
- } while (0)
-
-/**************************************************************************
-Hypercall bundle creation
-**************************************************************************/
-
-static void
-build_hypercall_bundle(uint64_t *imva, uint64_t brkimm, uint64_t hypnum, uint64_t ret)
-{
- INST64_A5 slot0;
- INST64_I19 slot1;
- INST64_B4 slot2;
- IA64_BUNDLE bundle;
-
- // slot1: mov r2 = hypnum (low 20 bits)
- slot0.inst = 0;
- slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
- slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
- slot0.imm5c = hypnum >> 16; slot0.s = 0;
- // slot1: break brkimm
- slot1.inst = 0;
- slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
- slot1.imm20 = brkimm; slot1.i = brkimm >> 20;
- // if ret slot2: br.ret.sptk.many rp
- // else slot2: br.cond.sptk.many rp
- slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
- slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
- if (ret) {
- slot2.btype = 4; slot2.x6 = 0x21;
- }
- else {
- slot2.btype = 0; slot2.x6 = 0x20;
- }
-
- bundle.i64[0] = 0; bundle.i64[1] = 0;
- bundle.template = 0x11;
- bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
- bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
-
- imva[0] = bundle.i64[0]; imva[1] = bundle.i64[1];
- ia64_fc(imva);
- ia64_fc(imva + 1);
-}
-
-static void
-build_pal_hypercall_bundles(uint64_t *imva, uint64_t brkimm, uint64_t hypnum)
-{
- extern unsigned long xen_ia64_pal_call_stub[];
- IA64_BUNDLE bundle;
- INST64_A5 slot_a5;
- INST64_M37 slot_m37;
-
- /*
- * The source of the hypercall stub is
- * the xen_ia64_pal_call_stub function defined in dom_fw_asm.S.
- */
-
- /* Copy the first bundle and patch the hypercall number. */
- bundle.i64[0] = xen_ia64_pal_call_stub[0];
- bundle.i64[1] = xen_ia64_pal_call_stub[1];
- slot_a5.inst = bundle.slot0;
- slot_a5.imm7b = hypnum;
- slot_a5.imm9d = hypnum >> 7;
- slot_a5.imm5c = hypnum >> 16;
- bundle.slot0 = slot_a5.inst;
- imva[0] = bundle.i64[0];
- imva[1] = bundle.i64[1];
- ia64_fc(imva);
- ia64_fc(imva + 1);
-
- /* Copy the second bundle and patch the hypercall vector. */
- bundle.i64[0] = xen_ia64_pal_call_stub[2];
- bundle.i64[1] = xen_ia64_pal_call_stub[3];
- slot_m37.inst = bundle.slot0;
- slot_m37.imm20a = brkimm;
- slot_m37.i = brkimm >> 20;
- bundle.slot0 = slot_m37.inst;
- imva[2] = bundle.i64[0];
- imva[3] = bundle.i64[1];
- ia64_fc(imva + 2);
- ia64_fc(imva + 3);
-}
-
-/* xen fpswa call stub. 14 bundles */
-extern const unsigned long xen_ia64_fpswa_call_stub[];
-extern const unsigned long xen_ia64_fpswa_call_stub_end[];
-extern const unsigned long xen_ia64_fpswa_call_stub_patch[];
-asm(
- ".align 32\n"
- ".proc xen_ia64_fpswa_call_stub;\n"
- "xen_ia64_fpswa_call_stub:\n"
- ".prologue\n"
- "alloc r3 = ar.pfs, 8, 0, 0, 0\n"
- ".body\n"
- "mov r14 = in0\n"
- "ld8 r15 = [in1], 8\n"
- ";;\n"
- "ld8 r16 = [in1]\n"
- "ld8 r17 = [in2]\n"
- "ld8 r18 = [in3]\n"
- "ld8 r19 = [in4]\n"
- "ld8 r20 = [in5]\n"
- "ld8 r21 = [in6]\n"
- "ld8 r22 = [in7], 8\n"
- ";;\n"
- "ld8 r23 = [in7], 8\n"
- ";;\n"
- "ld8 r24 = [in7], 8\n"
- ";;\n"
- "cmp.ne p6, p0 = r24, r0\n"
- "ld8 r25 = [in7], 8\n"
- ";;\n"
- "(p6) tpa r24 = r24\n"
- "cmp.ne p7, p0 = r25, r0\n"
- "ld8 r26 = [in7], 8\n"
- ";;\n"
- "(p7)tpa r25 = r25\n"
- "cmp.ne p8, p0 = r26, r0\n"
- "ld8 r27 = [in7], 8\n"
- ";;\n"
- "(p8)tpa r26 = r26\n"
- "cmp.ne p9, p0 = r27, r0\n"
- ";;\n"
- "tpa r27 = r27\n"
- "xen_ia64_fpswa_call_stub_patch:"
- "{\n"
- "mov r2 = " FW_HYPERCALL_FPSWA_STR "\n"
- "break " __IA64_XEN_HYPERCALL_DEFAULT_STR "\n"
- "nop.i 0\n"
- "}\n"
- "st8 [in2] = r17\n"
- "st8 [in3] = r18\n"
- "st8 [in4] = r19\n"
- "st8 [in5] = r20\n"
- "st8 [in6] = r21\n"
- "br.ret.sptk.many rp\n"
- "xen_ia64_fpswa_call_stub_end:"
- ".endp xen_ia64_fpswa_call_stub\n"
-);
-
-static void
-build_fpswa_hypercall_bundle(uint64_t *imva, uint64_t brkimm, uint64_t hypnum)
-{
- INST64_A5 slot0;
- INST64_I19 slot1;
- INST64_I18 slot2;
- IA64_BUNDLE bundle;
-
- /* slot0: mov r2 = hypnum (low 20 bits) */
- slot0.inst = 0;
- slot0.qp = 0;
- slot0.r1 = 2;
- slot0.r3 = 0;
- slot0.major = 0x9;
-
- slot0.s = 0;
- slot0.imm9d = hypnum >> 7;
- slot0.imm5c = hypnum >> 16;
- slot0.imm7b = hypnum;
-
- /* slot1: break brkimm */
- slot1.inst = 0;
- slot1.qp = 0;
- slot1.x6 = 0;
- slot1.x3 = 0;
- slot1.major = 0x0;
- slot1.i = brkimm >> 20;
- slot1.imm20 = brkimm;
-
- /* slot2: nop.i */
- slot2.inst = 0;
- slot2.qp = 0;
- slot2.imm20 = 0;
- slot2.y = 0;
- slot2.x6 = 1;
- slot2.x3 = 0;
- slot2.i = 0;
- slot2.major = 0;
-
- /* MII bundle */
- bundle.i64[0] = 0;
- bundle.i64[1] = 0;
- bundle.template = 0x0; /* MII */
- bundle.slot0 = slot0.inst;
- bundle.slot1a = slot1.inst;
- bundle.slot1b = slot1.inst >> 18;
- bundle.slot2 = slot2.inst;
-
- imva[0] = bundle.i64[0];
- imva[1] = bundle.i64[1];
- ia64_fc(imva);
- ia64_fc(imva + 1);
-}
-
-// builds a hypercall bundle at domain physical address
-static void
-dom_fpswa_hypercall_patch(uint64_t brkimm, unsigned long imva)
-{
- unsigned long *entry_imva, *patch_imva;
- const unsigned long entry_paddr = FW_HYPERCALL_FPSWA_ENTRY_PADDR;
- const unsigned long patch_paddr = FW_HYPERCALL_FPSWA_PATCH_PADDR;
- const size_t stub_size =
- (char*)xen_ia64_fpswa_call_stub_end -
- (char*)xen_ia64_fpswa_call_stub;
- size_t i;
-
- entry_imva = (unsigned long *)(imva + entry_paddr -
- FW_HYPERCALL_BASE_PADDR);
- patch_imva = (unsigned long *)(imva + patch_paddr -
- FW_HYPERCALL_BASE_PADDR);
-
- /* Descriptor. */
- *entry_imva++ = patch_paddr;
- *entry_imva = 0;
-
- /* see dom_fw.h */
- BUG_ON((char*)xen_ia64_fpswa_call_stub_end -
- (char*)xen_ia64_fpswa_call_stub > 0xff - 16 + 1);
-
- /* call stub */
- memcpy(patch_imva, xen_ia64_fpswa_call_stub, stub_size);
- for (i = 0; i < stub_size; i++)
- ia64_fc(imva + i);
- patch_imva +=
- xen_ia64_fpswa_call_stub_patch - xen_ia64_fpswa_call_stub;
- build_fpswa_hypercall_bundle(patch_imva, brkimm, FW_HYPERCALL_FPSWA);
-}
-
-// builds a hypercall bundle at domain physical address
-static void
-dom_efi_hypercall_patch(uint64_t brkimm, unsigned long paddr,
- unsigned long hypercall, unsigned long imva)
-{
- build_hypercall_bundle((uint64_t *)(imva + paddr -
- FW_HYPERCALL_BASE_PADDR),
- brkimm, hypercall, 1);
-}
-
-// builds a hypercall bundle at domain physical address
-static void
-dom_fw_hypercall_patch(uint64_t brkimm, unsigned long paddr,
- unsigned long hypercall,unsigned long ret,
- unsigned long imva)
-{
- build_hypercall_bundle((uint64_t *)(imva + paddr -
- FW_HYPERCALL_BASE_PADDR),
- brkimm, hypercall, ret);
-}
-
-static void
-dom_fw_pal_hypercall_patch(uint64_t brkimm, unsigned long paddr, unsigned long imva)
-{
- build_pal_hypercall_bundles((uint64_t*)(imva + paddr -
- FW_HYPERCALL_BASE_PADDR),
- brkimm, FW_HYPERCALL_PAL_CALL);
-}
-
-static inline void
-#ifdef __XEN__
-print_md(efi_memory_desc_t *md)
-#else
-print_md(xc_interface *xch, efi_memory_desc_t *md)
-#endif
-{
- uint64_t size;
-
- printk(XENLOG_INFO "dom mem: type=%2u, attr=0x%016lx, "
- "range=[0x%016lx-0x%016lx) ",
- md->type, md->attribute, md->phys_addr,
- md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT));
-
- size = md->num_pages << EFI_PAGE_SHIFT;
- if (size > ONE_MB)
- printk("(%luMB)\n", size >> 20);
- else
- printk("(%luKB)\n", size >> 10);
-}
-
-struct fake_acpi_tables {
- struct acpi_table_rsdp rsdp;
- struct acpi_table_xsdt xsdt;
- uint64_t madt_ptr;
- struct acpi_table_fadt fadt;
- struct acpi_table_facs facs;
- struct acpi_table_header dsdt;
- uint8_t aml[8 + 11 * MAX_VIRT_CPUS];
- struct acpi_table_madt madt;
- struct acpi_madt_local_sapic lsapic[MAX_VIRT_CPUS];
- uint8_t pm1a_event_block[4];
- uint8_t pm1a_control_block[1];
- uint8_t pm_timer_block[4];
-};
-#define ACPI_TABLE_MPA(field) \
- FW_ACPI_BASE_PADDR + offsetof(struct fake_acpi_tables, field);
-
-/* Create enough of an ACPI structure to make the guest OS ACPI happy. */
-void
-dom_fw_fake_acpi(domain_t *d, struct fake_acpi_tables *tables)
-{
- struct acpi_table_rsdp *rsdp = &tables->rsdp;
- struct acpi_table_xsdt *xsdt = &tables->xsdt;
- struct acpi_table_fadt *fadt = &tables->fadt;
- struct acpi_table_facs *facs = &tables->facs;
- struct acpi_table_header *dsdt = &tables->dsdt;
- struct acpi_table_madt *madt = &tables->madt;
- struct acpi_madt_local_sapic *lsapic = tables->lsapic;
- int i;
- int aml_len;
- int nbr_cpus;
-
- BUILD_BUG_ON(sizeof(struct fake_acpi_tables) >
- (FW_ACPI_END_PADDR - FW_ACPI_BASE_PADDR));
-
- memset(tables, 0, sizeof(struct fake_acpi_tables));
-
- /* setup XSDT (64bit version of RSDT) */
- memcpy(xsdt->header.signature, ACPI_SIG_XSDT,
- sizeof(xsdt->header.signature));
- /* XSDT points to both the FADT and the MADT, so add one entry */
- xsdt->header.length = sizeof(struct acpi_table_xsdt) + sizeof(uint64_t);
- xsdt->header.revision = 1;
- memcpy(xsdt->header.oem_id, "XEN", 3);
- memcpy(xsdt->header.oem_table_id, "Xen/ia64", 8);
- memcpy(xsdt->header.asl_compiler_id, "XEN", 3);
- xsdt->header.asl_compiler_revision = xen_ia64_version(d);
-
- xsdt->table_offset_entry[0] = ACPI_TABLE_MPA(fadt);
- tables->madt_ptr = ACPI_TABLE_MPA(madt);
-
- xsdt->header.checksum = -acpi_tb_checksum((u8*)xsdt,
- xsdt->header.length);
-
- /* setup FADT */
- memcpy(fadt->header.signature, ACPI_SIG_FADT,
- sizeof(fadt->header.signature));
- fadt->header.length = sizeof(struct acpi_table_fadt);
- fadt->header.revision = FADT2_REVISION_ID;
- memcpy(fadt->header.oem_id, "XEN", 3);
- memcpy(fadt->header.oem_table_id, "Xen/ia64", 8);
- memcpy(fadt->header.asl_compiler_id, "XEN", 3);
- fadt->header.asl_compiler_revision = xen_ia64_version(d);
-
- memcpy(facs->signature, ACPI_SIG_FACS, sizeof(facs->signature));
- facs->version = 1;
- facs->length = sizeof(struct acpi_table_facs);
-
- fadt->Xfacs = ACPI_TABLE_MPA(facs);
- fadt->Xdsdt = ACPI_TABLE_MPA(dsdt);
-
- /*
- * All of the below FADT entries are filled it to prevent warnings
- * from sanity checks in the ACPI CA. Emulate required ACPI hardware
- * registers in system memory.
- */
- fadt->pm1_event_length = 4;
- fadt->xpm1a_event_block.space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
- fadt->xpm1a_event_block.bit_width = 8;
- fadt->xpm1a_event_block.address = ACPI_TABLE_MPA(pm1a_event_block);
- fadt->pm1_control_length = 1;
- fadt->xpm1a_control_block.space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
- fadt->xpm1a_control_block.bit_width = 8;
- fadt->xpm1a_control_block.address = ACPI_TABLE_MPA(pm1a_control_block);
- fadt->pm_timer_length = 4;
- fadt->xpm_timer_block.space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
- fadt->xpm_timer_block.bit_width = 8;
- fadt->xpm_timer_block.address = ACPI_TABLE_MPA(pm_timer_block);
-
- fadt->header.checksum = -acpi_tb_checksum((u8*)fadt,
- fadt->header.length);
-
- /* setup RSDP */
- memcpy(rsdp->signature, ACPI_SIG_RSDP, strlen(ACPI_SIG_RSDP));
- memcpy(rsdp->oem_id, "XEN", 3);
- rsdp->revision = 2; /* ACPI 2.0 includes XSDT */
- rsdp->length = sizeof(struct acpi_table_rsdp);
- rsdp->xsdt_physical_address = ACPI_TABLE_MPA(xsdt);
-
- rsdp->checksum = -acpi_tb_checksum((u8*)rsdp,
- ACPI_RSDP_CHECKSUM_LENGTH);
- rsdp->extended_checksum = -acpi_tb_checksum((u8*)rsdp, rsdp->length);
-
- /* setup DSDT with trivial namespace. */
- memcpy(dsdt->signature, ACPI_SIG_DSDT, strlen(ACPI_SIG_DSDT));
- dsdt->revision = 1;
- memcpy(dsdt->oem_id, "XEN", 3);
- memcpy(dsdt->oem_table_id, "Xen/ia64", 8);
- memcpy(dsdt->asl_compiler_id, "XEN", 3);
- dsdt->asl_compiler_revision = xen_ia64_version(d);
-
- /* Trivial namespace, avoids ACPI CA complaints */
- tables->aml[0] = 0x10; /* Scope */
- tables->aml[1] = 0x40; /* length/offset to next object (patched) */
- tables->aml[2] = 0x00;
- memcpy(&tables->aml[3], "_SB_", 4);
-
- /* The processor object isn't absolutely necessary, revist for SMP */
- aml_len = 7;
- for (i = 0; i < 3; i++) {
- unsigned char *p = tables->aml + aml_len;
- p[0] = 0x5b; /* processor object */
- p[1] = 0x83;
- p[2] = 0x0b; /* next */
- p[3] = 'C';
- p[4] = 'P';
- snprintf ((char *)p + 5, 3, "%02x", i);
- if (i < 16)
- p[5] = 'U';
- p[7] = i; /* acpi_id */
- p[8] = 0; /* pblk_addr */
- p[9] = 0;
- p[10] = 0;
- p[11] = 0;
- p[12] = 0; /* pblk_len */
- aml_len += 13;
- }
- tables->aml[1] = 0x40 + ((aml_len - 1) & 0x0f);
- tables->aml[2] = (aml_len - 1) >> 4;
- dsdt->length = sizeof(struct acpi_table_header) + aml_len;
- dsdt->checksum = -acpi_tb_checksum((u8*)dsdt, dsdt->length);
-
- /* setup MADT */
- memcpy(madt->header.signature, ACPI_SIG_MADT,
- sizeof(madt->header.signature));
- madt->header.revision = 2;
- memcpy(madt->header.oem_id, "XEN", 3);
- memcpy(madt->header.oem_table_id, "Xen/ia64", 8);
- memcpy(madt->header.asl_compiler_id, "XEN", 3);
- madt->header.asl_compiler_revision = xen_ia64_version(d);
-
- /* An LSAPIC entry describes a CPU. */
- nbr_cpus = 0;
- for (i = 0; i < MAX_VIRT_CPUS; i++) {
- lsapic[i].header.type = ACPI_MADT_TYPE_LOCAL_SAPIC;
- lsapic[i].header.length = sizeof(lsapic[i]);
- lsapic[i].processor_id = i;
- lsapic[i].id = i;
- lsapic[i].eid = 0;
- if (xen_ia64_is_vcpu_allocated(d, i)) {
- lsapic[i].lapic_flags = ACPI_MADT_ENABLED;
- nbr_cpus++;
- }
- }
- madt->header.length = sizeof(struct acpi_table_madt) +
- nbr_cpus * sizeof(*lsapic);
- madt->header.checksum = -acpi_tb_checksum((u8*)madt,
- madt->header.length);
- return;
-}
-
-int
-efi_mdt_cmp(const void *a, const void *b)
-{
- const efi_memory_desc_t *x = a, *y = b;
-
- if (x->phys_addr > y->phys_addr)
- return 1;
- if (x->phys_addr < y->phys_addr)
- return -1;
-
- /* num_pages == 0 is allowed. */
- if (x->num_pages > y->num_pages)
- return 1;
- if (x->num_pages < y->num_pages)
- return -1;
-
- return 0;
-}
-
-int
-dom_fw_init(domain_t *d,
- uint64_t brkimm,
- struct xen_ia64_boot_param *bp,
- struct fw_tables *tables,
- unsigned long hypercalls_imva,
- unsigned long maxmem)
-{
- unsigned long pfn;
- unsigned char checksum;
- char *cp;
- int num_mds, i;
- int fpswa_supported = 0;
-
- /* Caller must zero-clear fw_tables */
-
- /* EFI systab. */
- tables->efi_systab.hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE;
- tables->efi_systab.hdr.revision = ((1 << 16) | 00); /* EFI 1.00 */
- tables->efi_systab.hdr.headersize = sizeof(tables->efi_systab.hdr);
-
- memcpy(tables->fw_vendor,FW_VENDOR,sizeof(FW_VENDOR));
- tables->efi_systab.fw_vendor = FW_FIELD_MPA(fw_vendor);
- tables->efi_systab.fw_revision = 1;
- tables->efi_systab.runtime = (void *)FW_FIELD_MPA(efi_runtime);
- tables->efi_systab.nr_tables = NUM_EFI_SYS_TABLES;
- tables->efi_systab.tables = FW_FIELD_MPA(efi_tables);
-
- /* EFI runtime. */
- tables->efi_runtime.hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
- tables->efi_runtime.hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
- tables->efi_runtime.hdr.headersize = sizeof(tables->efi_runtime.hdr);
-
- pfn = 0;
- EFI_HYPERCALL_PATCH(get_time,EFI_GET_TIME);
- EFI_HYPERCALL_PATCH(set_time,EFI_SET_TIME);
- EFI_HYPERCALL_PATCH(get_wakeup_time,EFI_GET_WAKEUP_TIME);
- EFI_HYPERCALL_PATCH(set_wakeup_time,EFI_SET_WAKEUP_TIME);
- EFI_HYPERCALL_PATCH(set_virtual_address_map,
- EFI_SET_VIRTUAL_ADDRESS_MAP);
- EFI_HYPERCALL_PATCH(get_variable,EFI_GET_VARIABLE);
- EFI_HYPERCALL_PATCH(get_next_variable,EFI_GET_NEXT_VARIABLE);
- EFI_HYPERCALL_PATCH(set_variable,EFI_SET_VARIABLE);
- EFI_HYPERCALL_PATCH(get_next_high_mono_count,
- EFI_GET_NEXT_HIGH_MONO_COUNT);
- EFI_HYPERCALL_PATCH(reset_system,EFI_RESET_SYSTEM);
-
- /* System tables. */
- tables->efi_tables[0].guid = SAL_SYSTEM_TABLE_GUID;
- tables->efi_tables[0].table = FW_FIELD_MPA(sal_systab);
- for (i = 1; i < NUM_EFI_SYS_TABLES; i++) {
- tables->efi_tables[i].guid = NULL_GUID;
- tables->efi_tables[i].table = 0;
- }
- if (xen_ia64_is_dom0(d)) {
- efi_systable_init_dom0(tables);
- } else {
-#ifdef __XEN__
- efi_systable_init_domu(tables);
-#else
- efi_systable_init_domu(d->xch, tables);
-#endif
- }
-
- /* fill in the SAL system table: */
- memcpy(tables->sal_systab.signature, "SST_", 4);
- tables->sal_systab.size = sizeof(tables->sal_systab);
- tables->sal_systab.sal_rev_minor = 1;
- tables->sal_systab.sal_rev_major = 0;
- tables->sal_systab.entry_count = 2;
-
- memcpy((char *)tables->sal_systab.oem_id, "Xen/ia64", 8);
- memcpy((char *)tables->sal_systab.product_id, "Xen/ia64", 8);
-
- /* PAL entry point: */
- tables->sal_ed.type = SAL_DESC_ENTRY_POINT;
- tables->sal_ed.pal_proc = FW_HYPERCALL_PAL_CALL_PADDR;
- dom_fw_pal_hypercall_patch(brkimm, tables->sal_ed.pal_proc,
- hypercalls_imva);
- /* SAL entry point. */
- tables->sal_ed.sal_proc = FW_HYPERCALL_SAL_CALL_PADDR;
- dom_fw_hypercall_patch(brkimm, tables->sal_ed.sal_proc,
- FW_HYPERCALL_SAL_CALL, 1, hypercalls_imva);
- tables->sal_ed.gp = 0; /* will be ignored */
-
- /* Fill an AP wakeup descriptor. */
- tables->sal_wakeup.type = SAL_DESC_AP_WAKEUP;
- tables->sal_wakeup.mechanism = IA64_SAL_AP_EXTERNAL_INT;
- tables->sal_wakeup.vector = XEN_SAL_BOOT_RENDEZ_VEC;
-
- /* Compute checksum. */
- checksum = 0;
- for (cp = (char *)&tables->sal_systab;
- cp < (char *)&tables->fpswa_inf;
- ++cp)
- checksum += *cp;
- tables->sal_systab.checksum = -checksum;
-
- /* SAL return point. */
- dom_fw_hypercall_patch(brkimm, FW_HYPERCALL_SAL_RETURN_PADDR,
- FW_HYPERCALL_SAL_RETURN, 0, hypercalls_imva);
-
- /* Fill in the FPSWA interface: */
- if (!xen_ia64_fpswa_revision(d, &tables->fpswa_inf.revision)) {
- fpswa_supported = 1;
- dom_fpswa_hypercall_patch(brkimm, hypercalls_imva);
- tables->fpswa_inf.fpswa =
- (void *)FW_HYPERCALL_FPSWA_ENTRY_PADDR;
- }
-
- tables->num_mds = 0;
- /* hypercall patches live here, masquerade as reserved PAL memory */
- xen_ia64_efi_make_md(&tables->efi_memmap[tables->num_mds],
- EFI_PAL_CODE, EFI_MEMORY_WB | EFI_MEMORY_RUNTIME,
- FW_HYPERCALL_BASE_PADDR, FW_HYPERCALL_END_PADDR);
- tables->num_mds++;
-
- /* Create dom0/domu md entry for fw and cpi tables area. */
- xen_ia64_efi_make_md(&tables->efi_memmap[tables->num_mds],
- EFI_ACPI_MEMORY_NVS,
- EFI_MEMORY_WB | EFI_MEMORY_RUNTIME,
- FW_ACPI_BASE_PADDR, FW_ACPI_END_PADDR);
- tables->num_mds++;
- xen_ia64_efi_make_md(&tables->efi_memmap[tables->num_mds],
- EFI_RUNTIME_SERVICES_DATA,
- EFI_MEMORY_WB | EFI_MEMORY_RUNTIME,
- FW_TABLES_BASE_PADDR,
- tables->fw_tables_end_paddr);
- tables->num_mds++;
-
- if (!xen_ia64_is_dom0(d) || xen_ia64_is_running_on_sim(d)) {
- /* DomU (or hp-ski).
- Create a continuous memory area. */
- /* kludge: bp->efi_memmap is used to pass memmap_info
- * page's pfn and number of pages to reserve.
- * Currently the following pages must be reserved.
- * memmap info page, start info page, xenstore page
- * and console page.
- * see ia64_setup_memmap() @ xc_dom_boot.c
- */
- num_mds = complete_domu_memmap(d, tables, maxmem,
- XEN_IA64_MEMMAP_INFO_PFN(bp),
- XEN_IA64_MEMMAP_INFO_NUM_PAGES(bp));
- } else {
- /* Dom0.
- We must preserve ACPI data from real machine,
- as well as IO areas. */
- num_mds = complete_dom0_memmap(d, tables);
- }
- if (num_mds < 0)
- return num_mds;
- BUG_ON(num_mds != tables->num_mds);
-
- /* Display memmap. */
- for (i = 0 ; i < tables->num_mds; i++)
-#ifdef __XEN__
- print_md(&tables->efi_memmap[i]);
-#else
- print_md(d->xch, &tables->efi_memmap[i]);
-#endif
-
- /* Fill boot_param */
- bp->efi_systab = FW_FIELD_MPA(efi_systab);
- bp->efi_memmap = FW_FIELD_MPA(efi_memmap);
- bp->efi_memmap_size = tables->num_mds * sizeof(efi_memory_desc_t);
- bp->efi_memdesc_size = sizeof(efi_memory_desc_t);
- bp->efi_memdesc_version = EFI_MEMDESC_VERSION;
- bp->command_line = 0;
- bp->console_info.num_cols = 80;
- bp->console_info.num_rows = 25;
- bp->console_info.orig_x = 0;
- bp->console_info.orig_y = 24;
- if (fpswa_supported)
- bp->fpswa = FW_FIELD_MPA(fpswa_inf);
- return 0;
-}
diff --git a/xen/arch/ia64/xen/dom_fw_dom0.c b/xen/arch/ia64/xen/dom_fw_dom0.c
deleted file mode 100644
index da2dc722ec..0000000000
--- a/xen/arch/ia64/xen/dom_fw_dom0.c
+++ /dev/null
@@ -1,563 +0,0 @@
-/******************************************************************************
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-/*
- * Xen domain firmware emulation support
- * Copyright (C) 2004 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- */
-
-#include <xen/config.h>
-#include <xen/errno.h>
-#include <xen/sched.h>
-#include <xen/list.h>
-#include <xen/acpi.h>
-#include <acpi/actables.h>
-
-#include <asm/dom_fw.h>
-#include <asm/dom_fw_common.h>
-#include <asm/dom_fw_dom0.h>
-#include <asm/dom_fw_utils.h>
-
-#include <linux/sort.h>
-
-struct acpi_backup_table_entry {
- struct list_head list;
- unsigned long pa;
- unsigned long size;
- unsigned char data[0];
-};
-
-static LIST_HEAD(acpi_backup_table_list);
-
-static u32 lsapic_nbr;
-
-/* Modify lsapic table. Provides LPs. */
-static int __init
-acpi_update_lsapic(struct acpi_subtable_header * header, const unsigned long end)
-{
- struct acpi_madt_local_sapic *lsapic =
- container_of(header, struct acpi_madt_local_sapic, header);
- int enable;
-
- if (!header)
- return -EINVAL;
-
- if (lsapic_nbr < dom0->max_vcpus && dom0->vcpu[lsapic_nbr] != NULL)
- enable = 1;
- else
- enable = 0;
-
- if ((lsapic->lapic_flags & ACPI_MADT_ENABLED) && enable) {
- printk("enable lsapic entry: 0x%lx\n", (u64) lsapic);
- lsapic->id = lsapic_nbr;
- lsapic->eid = 0;
- lsapic_nbr++;
- } else if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
- printk("DISABLE lsapic entry: 0x%lx\n", (u64) lsapic);
- lsapic->lapic_flags &= ~ACPI_MADT_ENABLED;
- lsapic->id = 0;
- lsapic->eid = 0;
- }
- return 0;
-}
-
-static int __init
-acpi_patch_plat_int_src(struct acpi_subtable_header * header,
- const unsigned long end)
-{
- struct acpi_madt_interrupt_source *plintsrc =
- container_of(header, struct acpi_madt_interrupt_source,
- header);
-
- if (!header)
- return -EINVAL;
-
- if (plintsrc->type == ACPI_INTERRUPT_CPEI) {
- printk("ACPI_INTERRUPT_CPEI disabled for Domain0\n");
- plintsrc->type = -1;
- }
- return 0;
-}
-
-static int __init
-acpi_update_madt_checksum(struct acpi_table_header *table)
-{
- struct acpi_table_madt *acpi_madt;
-
- if (!table)
- return -EINVAL;
-
- acpi_madt = (struct acpi_table_madt *)table;
- acpi_madt->header.checksum = 0;
- acpi_madt->header.checksum = -acpi_tb_checksum((u8*)acpi_madt,
- table->length);
-
- return 0;
-}
-
-static int __init
-acpi_backup_table(struct acpi_table_header *table)
-{
- struct acpi_backup_table_entry *entry;
-
- entry = xmalloc_bytes(sizeof(*entry) + table->length);
- if (!entry) {
- dprintk(XENLOG_WARNING, "Failed to allocate memory for "
- "%.4s table backup\n", table->signature);
- return -ENOMEM;
- }
-
- entry->pa = __pa(table);
- entry->size = table->length;
-
- memcpy(entry->data, table, table->length);
-
- list_add(&entry->list, &acpi_backup_table_list);
-
- printk(XENLOG_INFO "Backup %.4s table stored @0x%p\n",
- table->signature, entry->data);
-
- return 0;
-}
-
-void
-acpi_restore_tables()
-{
- struct acpi_backup_table_entry *entry;
-
- list_for_each_entry(entry, &acpi_backup_table_list, list) {
- printk(XENLOG_INFO "Restoring backup %.4s table @0x%p\n",
- ((struct acpi_table_header *)entry->data)->signature,
- entry->data);
-
- memcpy(__va(entry->pa), entry->data, entry->size);
- /* Only called from kexec path, no need to free entries */
- }
-}
-
-static int __init __acpi_table_disable(struct acpi_table_header *header)
-{
- printk("Disabling ACPI table: %4.4s\n", header->signature);
-
- memcpy(header->oem_id, "xxxxxx", 6);
- memcpy(header->oem_id+1, header->signature, 4);
- memcpy(header->oem_table_id, "Xen ", 8);
- memcpy(header->signature, "OEMx", 4);
- header->checksum = 0;
- header->checksum = -acpi_tb_checksum((u8*)header, header->length);
-
- return 0;
-}
-
-static void __init acpi_table_disable(char *id)
-{
- acpi_table_parse(id, __acpi_table_disable);
-}
-
-/* base is physical address of acpi table */
-static void __init touch_acpi_table(void)
-{
- struct acpi_table_header *madt = NULL;
-
- lsapic_nbr = 0;
-
- acpi_get_table(ACPI_SIG_MADT, 0, &madt);
-
- /*
- * Modify dom0 MADT:
- * - Disable CPUs that would exceed max vCPUs for the domain
- * - Virtualize id/eid for indexing into domain vCPU array
- * - Hide CPEI interrupt source
- *
- * ACPI tables must be backed-up before modification!
- *
- * We update the checksum each time we modify to keep the
- * ACPI CA from warning about invalid checksums.
- */
- acpi_table_parse(ACPI_SIG_MADT, acpi_backup_table);
-
- if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
- acpi_update_lsapic, 0) < 0)
- printk("Error parsing MADT - no LAPIC entries\n");
-
- acpi_update_madt_checksum(madt);
-
- if (acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_SOURCE,
- acpi_patch_plat_int_src, 0) < 0)
- printk("Error parsing MADT - no PLAT_INT_SRC entries\n");
-
- acpi_update_madt_checksum(madt);
-
- /*
- * SRAT & SLIT tables aren't useful for Dom0 until
- * we support more NUMA configuration information in Xen.
- *
- * NB - backup ACPI tables first.
- */
- acpi_table_parse(ACPI_SIG_SRAT, acpi_backup_table);
- acpi_table_parse(ACPI_SIG_SLIT, acpi_backup_table);
-
- acpi_table_disable(ACPI_SIG_SRAT);
- acpi_table_disable(ACPI_SIG_SLIT);
- return;
-}
-
-void __init efi_systable_init_dom0(struct fw_tables *tables)
-{
- int i = 1;
-
- touch_acpi_table();
-
- /* Write messages to the console. */
- printk("Domain0 EFI passthrough:");
- if (efi.mps != EFI_INVALID_TABLE_ADDR) {
- tables->efi_tables[i].guid = MPS_TABLE_GUID;
- tables->efi_tables[i].table = efi.mps;
- printk(" MPS=0x%lx", tables->efi_tables[i].table);
- i++;
- }
- if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) {
- tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
- tables->efi_tables[i].table = efi.acpi20;
- printk(" ACPI 2.0=0x%lx", tables->efi_tables[i].table);
- i++;
- }
- if (efi.acpi != EFI_INVALID_TABLE_ADDR) {
- tables->efi_tables[i].guid = ACPI_TABLE_GUID;
- tables->efi_tables[i].table = efi.acpi;
- printk(" ACPI=0x%lx", tables->efi_tables[i].table);
- i++;
- }
- if (efi.smbios != EFI_INVALID_TABLE_ADDR) {
- tables->efi_tables[i].guid = SMBIOS_TABLE_GUID;
- tables->efi_tables[i].table = efi.smbios;
- printk(" SMBIOS=0x%lx", tables->efi_tables[i].table);
- i++;
- }
- if (efi.hcdp != EFI_INVALID_TABLE_ADDR) {
- tables->efi_tables[i].guid = HCDP_TABLE_GUID;
- tables->efi_tables[i].table = efi.hcdp;
- printk(" HCDP=0x%lx", tables->efi_tables[i].table);
- i++;
- }
- printk("\n");
- BUG_ON(i > NUM_EFI_SYS_TABLES);
-}
-
-static void __init
-setup_dom0_memmap_info(struct domain *d, struct fw_tables *tables)
-{
- int i;
- size_t size;
- unsigned int num_pages;
- efi_memory_desc_t *md;
- efi_memory_desc_t *last_mem_md = NULL;
- xen_ia64_memmap_info_t *memmap_info;
- unsigned long paddr_start;
- unsigned long paddr_end;
-
- size = sizeof(*memmap_info) +
- (tables->num_mds + 1) * sizeof(tables->efi_memmap[0]);
- num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- for (i = tables->num_mds - 1; i >= 0; i--) {
- md = &tables->efi_memmap[i];
- if (md->attribute == EFI_MEMORY_WB &&
- md->type == EFI_CONVENTIONAL_MEMORY &&
- md->num_pages >
- ((num_pages + 1) << (PAGE_SHIFT - EFI_PAGE_SHIFT))) {
- last_mem_md = md;
- break;
- }
- }
-
- if (last_mem_md == NULL) {
- printk("%s: warning: "
- "no dom0 contiguous memory to hold memory map\n",
- __func__);
- return;
- }
- paddr_end = last_mem_md->phys_addr +
- (last_mem_md->num_pages << EFI_PAGE_SHIFT);
- paddr_start = (paddr_end - (num_pages << PAGE_SHIFT)) & PAGE_MASK;
- last_mem_md->num_pages -= (paddr_end - paddr_start) >> EFI_PAGE_SHIFT;
-
- md = &tables->efi_memmap[tables->num_mds];
- tables->num_mds++;
- md->type = EFI_RUNTIME_SERVICES_DATA;
- md->phys_addr = paddr_start;
- md->virt_addr = 0;
- md->num_pages = num_pages << (PAGE_SHIFT - EFI_PAGE_SHIFT);
- md->attribute = EFI_MEMORY_WB;
-
- BUG_ON(tables->fw_tables_size <
- sizeof(*tables) +
- sizeof(tables->efi_memmap[0]) * tables->num_mds);
- /* with this sort, md doesn't point memmap table */
- sort(tables->efi_memmap, tables->num_mds,
- sizeof(efi_memory_desc_t), efi_mdt_cmp, NULL);
-
- memmap_info = domain_mpa_to_imva(d, paddr_start);
- memmap_info->efi_memdesc_size = sizeof(md[0]);
- memmap_info->efi_memdesc_version = EFI_MEMORY_DESCRIPTOR_VERSION;
- memmap_info->efi_memmap_size = tables->num_mds * sizeof(md[0]);
- dom_fw_copy_to(d,
- paddr_start + offsetof(xen_ia64_memmap_info_t, memdesc),
- &tables->efi_memmap[0], memmap_info->efi_memmap_size);
- d->shared_info->arch.memmap_info_num_pages = num_pages;
- d->shared_info->arch.memmap_info_pfn = paddr_start >> PAGE_SHIFT;
-}
-
-/* setup_guest() @ libxc/xc_linux_build() arranges memory for domU.
- * however no one arranges memory for dom0,
- * instead we allocate pages manually.
- */
-static void
-assign_new_domain0_range(struct domain *d, const efi_memory_desc_t * md)
-{
- if (md->type == EFI_PAL_CODE ||
- md->type == EFI_RUNTIME_SERVICES_DATA ||
- md->type == EFI_CONVENTIONAL_MEMORY) {
- unsigned long start = md->phys_addr & PAGE_MASK;
- unsigned long end =
- md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
- unsigned long addr;
-
- if (end == start) {
- /* md->num_pages = 0 is allowed. */
- return;
- }
-
- for (addr = start; addr < end; addr += PAGE_SIZE)
- assign_new_domain0_page(d, addr);
- }
-}
-
-/* Complete the dom0 memmap. */
-int __init
-complete_dom0_memmap(struct domain *d, struct fw_tables *tables)
-{
- u64 addr;
- void *efi_map_start, *efi_map_end, *p;
- u64 efi_desc_size;
- int i;
-
- for (i = 0; i < tables->num_mds; i++)
- assign_new_domain0_range(d, &tables->efi_memmap[i]);
-
- /* Walk through all MDT entries.
- Copy all interesting entries. */
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-
- /* EFI memory descriptor is using 4k page, while xen is using 16k page.
- * To avoid identity mapping for EFI_ACPI_RECLAIM_MEMORY etc. being
- * blocked by WB mapping, scan memory descriptor twice.
- * First: setup identity mapping for EFI_ACPI_RECLAIM_MEMORY etc.
- * Second: setup mapping for EFI_CONVENTIONAL_MEMORY etc.
- */
-
- /* first scan, setup identity mapping for EFI_ACPI_RECLAIM_MEMORY etc. */
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- const efi_memory_desc_t *md = p;
- efi_memory_desc_t *dom_md = &tables->efi_memmap[tables->num_mds];
- u64 start = md->phys_addr;
- u64 size = md->num_pages << EFI_PAGE_SHIFT;
- u64 end = start + size;
- u64 mpaddr;
- unsigned long flags;
-
- switch (md->type) {
- case EFI_RUNTIME_SERVICES_CODE:
- case EFI_RUNTIME_SERVICES_DATA:
- case EFI_ACPI_RECLAIM_MEMORY:
- case EFI_ACPI_MEMORY_NVS:
- case EFI_RESERVED_TYPE:
- /*
- * Map into dom0 - We must respect protection
- * and cache attributes. Not all of these pages
- * are writable!!!
- */
- flags = ASSIGN_writable; /* dummy - zero */
- if (md->attribute & EFI_MEMORY_WP)
- flags |= ASSIGN_readonly;
- if ((md->attribute & EFI_MEMORY_UC) &&
- !(md->attribute & EFI_MEMORY_WB))
- flags |= ASSIGN_nocache;
-
- assign_domain_mach_page(d, start, size, flags);
-
- /* Fall-through. */
- case EFI_MEMORY_MAPPED_IO:
- /* Will be mapped with ioremap. */
- /* Copy descriptor. */
- *dom_md = *md;
- dom_md->virt_addr = 0;
- tables->num_mds++;
- break;
-
- case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
- flags = ASSIGN_writable; /* dummy - zero */
- if (md->attribute & EFI_MEMORY_UC)
- flags |= ASSIGN_nocache;
-
- if (start > 0x1ffffffff0000000UL) {
- mpaddr = 0x4000000000000UL - size;
- printk(XENLOG_INFO "Remapping IO ports from "
- "%lx to %lx\n", start, mpaddr);
- } else
- mpaddr = start;
-
- /* Map into dom0. */
- assign_domain_mmio_page(d, mpaddr, start, size, flags);
- /* Copy descriptor. */
- *dom_md = *md;
- dom_md->phys_addr = mpaddr;
- dom_md->virt_addr = 0;
- tables->num_mds++;
- break;
-
- case EFI_CONVENTIONAL_MEMORY:
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- case EFI_BOOT_SERVICES_CODE:
- case EFI_BOOT_SERVICES_DATA:
- break;
-
- case EFI_UNUSABLE_MEMORY:
- case EFI_PAL_CODE:
- /*
- * We don't really need these, but holes in the
- * memory map may cause Linux to assume there are
- * uncacheable ranges within a granule.
- */
- dom_md->type = EFI_UNUSABLE_MEMORY;
- dom_md->phys_addr = start;
- dom_md->virt_addr = 0;
- dom_md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
- dom_md->attribute = EFI_MEMORY_WB;
- tables->num_mds++;
- break;
-
- default:
- /* Print a warning but continue. */
- printk("complete_dom0_memmap: warning: "
- "unhandled MDT entry type %u\n", md->type);
- }
- }
-
-
- /* secend scan, setup mapping for EFI_CONVENTIONAL_MEMORY etc. */
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- const efi_memory_desc_t *md = p;
- efi_memory_desc_t *dom_md = &tables->efi_memmap[tables->num_mds];
- u64 start = md->phys_addr;
- u64 size = md->num_pages << EFI_PAGE_SHIFT;
- u64 end = start + size;
-
- switch (md->type) {
-
- case EFI_CONVENTIONAL_MEMORY:
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- case EFI_BOOT_SERVICES_CODE:
- case EFI_BOOT_SERVICES_DATA: {
- u64 dom_md_start;
- u64 dom_md_end;
- unsigned long left_mem =
- (unsigned long)(d->max_pages - d->tot_pages) <<
- PAGE_SHIFT;
-
- if (!(md->attribute & EFI_MEMORY_WB))
- break;
-
- dom_md_start = max(tables->fw_end_paddr, start);
- dom_md_end = dom_md_start;
- do {
- dom_md_end = min(dom_md_end + left_mem, end);
- if (dom_md_end < dom_md_start + PAGE_SIZE)
- break;
-
- dom_md->type = EFI_CONVENTIONAL_MEMORY;
- dom_md->phys_addr = dom_md_start;
- dom_md->virt_addr = 0;
- dom_md->num_pages =
- (dom_md_end - dom_md_start) >>
- EFI_PAGE_SHIFT;
- dom_md->attribute = EFI_MEMORY_WB;
-
- assign_new_domain0_range(d, dom_md);
- /*
- * recalculate left_mem.
- * we might already allocated memory in
- * this region because of kernel loader.
- * So we might consumed less than
- * (dom_md_end - dom_md_start) above.
- */
- left_mem = (unsigned long)
- (d->max_pages - d->tot_pages) <<
- PAGE_SHIFT;
- } while (left_mem > 0 && dom_md_end < end);
-
- if (!(dom_md_end < dom_md_start + PAGE_SIZE))
- tables->num_mds++;
- break;
- }
-
-
- default:
- break;
- }
- }
-
- BUG_ON(tables->fw_tables_size <
- sizeof(*tables) +
- sizeof(tables->efi_memmap[0]) * tables->num_mds);
-
- sort(tables->efi_memmap, tables->num_mds, sizeof(efi_memory_desc_t),
- efi_mdt_cmp, NULL);
-
- // Map low-memory holes & unmapped MMIO for legacy drivers
- for (addr = 0; addr < ONE_MB; addr += PAGE_SIZE) {
- if (domain_page_mapped(d, addr))
- continue;
-
- if (efi_mmio(addr, PAGE_SIZE)) {
- unsigned long flags;
- flags = ASSIGN_writable | ASSIGN_nocache;
- assign_domain_mmio_page(d, addr, addr, PAGE_SIZE,
- flags);
- }
- }
- setup_dom0_memmap_info(d, tables);
- return tables->num_mds;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "linux"
- * c-basic-offset: 8
- * tab-width: 8
- * indent-tabs-mode: t
- * End:
- */
diff --git a/xen/arch/ia64/xen/dom_fw_domu.c b/xen/arch/ia64/xen/dom_fw_domu.c
deleted file mode 100644
index ecf86b642b..0000000000
--- a/xen/arch/ia64/xen/dom_fw_domu.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/******************************************************************************
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-/*
- * Xen domain firmware emulation support
- * Copyright (C) 2004 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- */
-
-#ifdef __XEN__
-#include <xen/sched.h>
-#include <asm/dom_fw_utils.h>
-#include <linux/sort.h>
-#define xen_ia64_dom_fw_map(d, mpaddr) domain_mpa_to_imva((d), (mpaddr))
-#define xen_ia64_dom_fw_unmap(d, vaddr) do { } while (0)
-#else
-#include <stdio.h>
-#include <stdlib.h>
-#include <assert.h>
-#include <errno.h>
-#include <inttypes.h>
-
-#include <xen/xen.h>
-
-#include "xg_private.h"
-#include "xc_dom.h"
-#include "ia64/xc_dom_ia64_util.h"
-#endif
-
-#include <asm/dom_fw.h>
-#include <asm/dom_fw_domu.h>
-
-#ifdef __XEN__
-void efi_systable_init_domu(struct fw_tables *tables)
-#else
-void efi_systable_init_domu(xc_interface *xch, struct fw_tables *tables)
-#endif
-{
- int i = 1;
-
- printk(XENLOG_GUEST XENLOG_INFO "DomainU EFI build up:");
-
- tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
- tables->efi_tables[i].table = FW_ACPI_BASE_PADDR;
- printk(" ACPI 2.0=0x%lx", tables->efi_tables[i].table);
- i++;
- printk("\n");
- BUG_ON(i > NUM_EFI_SYS_TABLES);
-}
-
-int
-complete_domu_memmap(domain_t * d,
- struct fw_tables *tables,
- unsigned long maxmem,
- unsigned long memmap_info_pfn,
- unsigned long memmap_info_num_pages)
-{
- efi_memory_desc_t *md;
- int create_memmap = 0;
- xen_ia64_memmap_info_t *memmap_info;
- unsigned long memmap_info_size;
- unsigned long paddr_start;
- unsigned long paddr_end;
- void *p;
- void *memmap_start;
- void *memmap_end;
-#ifndef __XEN__
- xc_interface *xch = d->xch;
-#endif
-
- if (memmap_info_pfn == 0 || memmap_info_num_pages == 0) {
- /* old domain builder which doesn't setup
- * memory map. create it for compatibility */
- memmap_info_pfn = (maxmem >> PAGE_SHIFT) - 1;
- memmap_info_num_pages = 1;
- create_memmap = 1;
- }
-
- memmap_info_size = memmap_info_num_pages << PAGE_SHIFT;
- paddr_start = memmap_info_pfn << PAGE_SHIFT;
- /* 3 = start info page, xenstore page and console page */
- paddr_end = paddr_start + memmap_info_size + 3 * PAGE_SIZE;
- memmap_info = xen_ia64_dom_fw_map(d, paddr_start);
-
- if (memmap_info->efi_memmap_size == 0) {
- create_memmap = 1;
- } else if (memmap_info->efi_memdesc_size != sizeof(md[0]) ||
- memmap_info->efi_memdesc_version !=
- EFI_MEMORY_DESCRIPTOR_VERSION) {
- printk(XENLOG_WARNING
- "%s: Warning: unknown memory map "
- "memmap size %" PRIu64 " "
- "memdesc size %" PRIu64 " "
- "version %" PRIu32 "\n",
- __func__,
- memmap_info->efi_memmap_size,
- memmap_info->efi_memdesc_size,
- memmap_info->efi_memdesc_version);
- create_memmap = 1;
- } else if (memmap_info_size < memmap_info->efi_memmap_size) {
- printk(XENLOG_WARNING
- "%s: Warning: too short memmap info size %" PRIu64 "\n",
- __func__, memmap_info_size);
- xen_ia64_dom_fw_unmap(d, memmap_info);
- return -EINVAL;
- } else if (memmap_info->efi_memmap_size >
- PAGE_SIZE - sizeof(*memmap_info)) {
- /*
- * curently memmap spanning more than single page isn't
- * supported.
- */
- printk(XENLOG_WARNING
- "%s: Warning: too large efi_memmap_size %" PRIu64 "\n",
- __func__, memmap_info->efi_memmap_size);
- xen_ia64_dom_fw_unmap(d, memmap_info);
- return -ENOSYS;
- }
-
- if (create_memmap) {
- /*
- * old domain builder which doesn't setup
- * memory map. create it for compatibility
- */
- memmap_info->efi_memdesc_size = sizeof(md[0]);
- memmap_info->efi_memdesc_version =
- EFI_MEMORY_DESCRIPTOR_VERSION;
- memmap_info->efi_memmap_size = 1 * sizeof(md[0]);
-
- md = (efi_memory_desc_t *) & memmap_info->memdesc;
- md->type = EFI_CONVENTIONAL_MEMORY;
- md->pad = 0;
- md->phys_addr = 0;
- md->virt_addr = 0;
- md->num_pages = maxmem >> EFI_PAGE_SHIFT;
- md->attribute = EFI_MEMORY_WB;
- }
-
- memmap_start = &memmap_info->memdesc;
- memmap_end = memmap_start + memmap_info->efi_memmap_size;
-
- /* XXX Currently the table must be in a single page. */
- if ((unsigned long)memmap_end > (unsigned long)memmap_info + PAGE_SIZE) {
- xen_ia64_dom_fw_unmap(d, memmap_info);
- return -EINVAL;
- }
-
- /* sort it bofore use
- * XXX: this is created by user space domain builder so that
- * we should check its integrity */
- sort(&memmap_info->memdesc,
- memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size,
- memmap_info->efi_memdesc_size, efi_mdt_cmp, NULL);
-
- for (p = memmap_start; p < memmap_end;
- p += memmap_info->efi_memdesc_size) {
- unsigned long start;
- unsigned long end;
-
- md = p;
- start = md->phys_addr;
- end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-
- if (start < tables->fw_end_paddr)
- start = tables->fw_end_paddr;
- if (end <= start)
- continue;
-
- /* exclude [paddr_start, paddr_end) */
- if (paddr_end <= start || end <= paddr_start) {
- xen_ia64_efi_make_md(&tables->
- efi_memmap[tables->num_mds],
- EFI_CONVENTIONAL_MEMORY,
- EFI_MEMORY_WB, start, end);
- tables->num_mds++;
- } else if (paddr_start <= start && paddr_end < end) {
- xen_ia64_efi_make_md(&tables->
- efi_memmap[tables->num_mds],
- EFI_CONVENTIONAL_MEMORY,
- EFI_MEMORY_WB, paddr_end, end);
- tables->num_mds++;
- } else if (start < paddr_start && end <= paddr_end) {
- xen_ia64_efi_make_md(&tables->
- efi_memmap[tables->num_mds],
- EFI_CONVENTIONAL_MEMORY,
- EFI_MEMORY_WB, start, paddr_start);
- tables->num_mds++;
- } else {
- xen_ia64_efi_make_md(&tables->
- efi_memmap[tables->num_mds],
- EFI_CONVENTIONAL_MEMORY,
- EFI_MEMORY_WB, start, paddr_start);
- tables->num_mds++;
- xen_ia64_efi_make_md(&tables->
- efi_memmap[tables->num_mds],
- EFI_CONVENTIONAL_MEMORY,
- EFI_MEMORY_WB, paddr_end, end);
- tables->num_mds++;
- }
- }
-
- /* memmap info page. */
- xen_ia64_efi_make_md(&tables->efi_memmap[tables->num_mds],
- EFI_RUNTIME_SERVICES_DATA, EFI_MEMORY_WB,
- paddr_start, paddr_end);
- tables->num_mds++;
-
- /* Create an entry for IO ports. */
- xen_ia64_efi_make_md(&tables->efi_memmap[tables->num_mds],
- EFI_MEMORY_MAPPED_IO_PORT_SPACE, EFI_MEMORY_UC,
- IO_PORTS_PADDR, IO_PORTS_PADDR + IO_PORTS_SIZE);
- tables->num_mds++;
-
- sort(tables->efi_memmap, tables->num_mds, sizeof(efi_memory_desc_t),
- efi_mdt_cmp, NULL);
-
- xen_ia64_dom_fw_unmap(d, memmap_info);
- return tables->num_mds;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "linux"
- * c-basic-offset: 8
- * tab-width: 8
- * indent-tabs-mode: t
- * End:
- */
diff --git a/xen/arch/ia64/xen/dom_fw_sn2.c b/xen/arch/ia64/xen/dom_fw_sn2.c
deleted file mode 100644
index 93da9b6a22..0000000000
--- a/xen/arch/ia64/xen/dom_fw_sn2.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Xen domain0 platform firmware fixups for sn2
- * Copyright (C) 2007 Silicon Graphics Inc.
- * Jes Sorensen <jes@sgi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <xen/config.h>
-#include <xen/acpi.h>
-#include <xen/errno.h>
-#include <xen/sched.h>
-#include <xen/nodemask.h>
-
-#include <asm/dom_fw.h>
-#include <asm/dom_fw_common.h>
-#include <asm/dom_fw_dom0.h>
-#include <asm/dom_fw_utils.h>
-
-#include <asm/sn/arch.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/shub_mmr.h>
-
-#define SWAP_NASID(n, x) ((x & ~NASID_MASK) | NASID_SPACE(n))
-
-int __init
-sn2_dom_fw_init(domain_t *d,
- struct xen_ia64_boot_param *bp,
- struct fw_tables *tables)
-{
- int node;
- short nasid;
- unsigned long shubid, shubpicam, shubpiowrite;
-
- printk("SN2 mapping specific registers to dom0\n");
-
- assign_domain_mach_page(d, LOCAL_MMR_OFFSET | SH_RTC, PAGE_SIZE,
- ASSIGN_nocache);
-
- if (is_shub1()) {
- /* 0x110060000 */
- shubid = SH1_GLOBAL_MMR_OFFSET + (SH1_SHUB_ID & PAGE_MASK);
- /* 0x120050000 */
- shubpicam = SH1_GLOBAL_MMR_OFFSET +
- (SH1_PI_CAM_CONTROL & PAGE_MASK);
- /* 0x120070000 */
- shubpiowrite = SH1_GLOBAL_MMR_OFFSET +
- (SH1_PIO_WRITE_STATUS_0 & PAGE_MASK);
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- shubid = SWAP_NASID(nasid, shubid);
- shubpicam = SWAP_NASID(nasid, shubpicam);
- shubpiowrite = SWAP_NASID(nasid, shubpiowrite);
-
- assign_domain_mach_page(d, shubid, PAGE_SIZE,
- ASSIGN_nocache);
- assign_domain_mach_page(d, shubpicam, PAGE_SIZE,
- ASSIGN_nocache);
- assign_domain_mach_page(d, shubpiowrite, PAGE_SIZE,
- ASSIGN_nocache);
- }
-
- /* map leds */
- assign_domain_mach_page(d, LOCAL_MMR_OFFSET |
- SH1_REAL_JUNK_BUS_LED0,
- PAGE_SIZE, ASSIGN_nocache);
- assign_domain_mach_page(d, LOCAL_MMR_OFFSET |
- SH1_REAL_JUNK_BUS_LED1,
- PAGE_SIZE, ASSIGN_nocache);
- assign_domain_mach_page(d, LOCAL_MMR_OFFSET |
- SH1_REAL_JUNK_BUS_LED2,
- PAGE_SIZE, ASSIGN_nocache);
- assign_domain_mach_page(d, LOCAL_MMR_OFFSET |
- SH1_REAL_JUNK_BUS_LED3,
- PAGE_SIZE, ASSIGN_nocache);
- } else
- panic("Unable to build EFI entry for SHUB 2 MMR\n");
-
- return 0;
-}
diff --git a/xen/arch/ia64/xen/dom_fw_utils.c b/xen/arch/ia64/xen/dom_fw_utils.c
deleted file mode 100644
index 533ecb3227..0000000000
--- a/xen/arch/ia64/xen/dom_fw_utils.c
+++ /dev/null
@@ -1,361 +0,0 @@
-/******************************************************************************
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <xen/types.h>
-#include <xen/version.h>
-#include <xen/errno.h>
-#include <xen/sched.h>
-
-#include <asm/fpswa.h>
-#include <asm/dom_fw.h>
-#include <asm/dom_fw_common.h>
-#include <asm/dom_fw_utils.h>
-
-#include <linux/sort.h>
-
-uint32_t xen_ia64_version(struct domain *unused)
-{
- return (xen_major_version() << 16) | xen_minor_version();
-}
-
-int xen_ia64_fpswa_revision(struct domain *d, unsigned int *revision)
-{
- if (fpswa_interface == NULL)
- return -ENOSYS;
-
- *revision = fpswa_interface->revision;
- return 0;
-}
-
-int xen_ia64_is_vcpu_allocated(struct domain *d, uint32_t vcpu)
-{
- return d->vcpu[vcpu] != NULL;
-}
-
-int xen_ia64_is_running_on_sim(struct domain *unused)
-{
- return running_on_sim;
-}
-
-int xen_ia64_is_dom0(struct domain *d)
-{
- return d == dom0;
-}
-
-void xen_ia64_set_convmem_end(struct domain *d, uint64_t convmem_end)
-{
- d->arch.convmem_end = convmem_end;
-}
-
-static void dom_fw_domain_init(struct domain *d, struct fw_tables *tables)
-{
- /* Initialise for EFI_SET_VIRTUAL_ADDRESS_MAP emulation */
- d->arch.efi_runtime = &tables->efi_runtime;
- d->arch.fpswa_inf = &tables->fpswa_inf;
- d->arch.sal_data = &tables->sal_data;
-}
-
-static int dom_fw_set_convmem_end(struct domain *d)
-{
- unsigned long gpaddr;
- size_t size;
- xen_ia64_memmap_info_t *memmap_info;
- efi_memory_desc_t *md;
- void *p;
- void *memmap_start;
- void *memmap_end;
-
- if (d->shared_info->arch.memmap_info_pfn == 0)
- return -EINVAL;
-
- gpaddr = d->shared_info->arch.memmap_info_pfn << PAGE_SHIFT;
- size = d->shared_info->arch.memmap_info_num_pages << PAGE_SHIFT;
- memmap_info = _xmalloc(size, __alignof__(*memmap_info));
- if (memmap_info == NULL)
- return -ENOMEM;
- dom_fw_copy_from(memmap_info, d, gpaddr, size);
- if (memmap_info->efi_memmap_size == 0 ||
- memmap_info->efi_memdesc_size != sizeof(*md) ||
- memmap_info->efi_memdesc_version != EFI_MEMORY_DESCRIPTOR_VERSION ||
- sizeof(*memmap_info) + memmap_info->efi_memmap_size > size ||
- memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size == 0) {
- xfree(memmap_info);
- return -EINVAL;
- }
-
- memmap_start = &memmap_info->memdesc;
- memmap_end = memmap_start + memmap_info->efi_memmap_size;
-
- /* sort it bofore use
- * XXX: this is created by user space domain builder so that
- * we should check its integrity */
- sort(&memmap_info->memdesc,
- memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size,
- memmap_info->efi_memdesc_size, efi_mdt_cmp, NULL);
-
- if (d->arch.convmem_end == 0)
- xen_ia64_set_convmem_end(d, d->max_pages << PAGE_SHIFT);
-
- for (p = memmap_start; p < memmap_end;
- p += memmap_info->efi_memdesc_size) {
- unsigned long end;
-
- md = p;
- end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-
- if (md->attribute == EFI_MEMORY_WB &&
- md->type == EFI_CONVENTIONAL_MEMORY &&
- md->num_pages > 0 && d->arch.convmem_end < end)
- xen_ia64_set_convmem_end(d, end);
- }
-
- dom_fw_copy_to(d, gpaddr, memmap_info, size);
- xfree(memmap_info);
- return 0;
-}
-
-/* allocate a page for fw
- * guest_setup() @ libxc/xc_linux_build.c does for domU
- */
-static inline void
-assign_new_domain_page_if_dom0(struct domain *d, unsigned long mpaddr)
-{
- if (d == dom0)
- assign_new_domain0_page(d, mpaddr);
-}
-
-static void dom_fw_setup_for_domain_restore(domain_t * d, unsigned long maxmem)
-{
- assign_new_domain_page(d, FW_HYPERCALL_BASE_PADDR);
- dom_fw_domain_init(d, domain_mpa_to_imva(d, FW_TABLES_BASE_PADDR));
- xen_ia64_set_convmem_end(d, maxmem);
-}
-
-/* copy memory range to domain pseudo physical address space */
-void
-dom_fw_copy_to(struct domain *d, unsigned long dest_gpaddr,
- void *src, size_t size)
-{
- while (size > 0) {
- unsigned long page_offset = dest_gpaddr & ~PAGE_MASK;
- size_t copy_size = size;
- void *dest;
-
- if (page_offset + copy_size > PAGE_SIZE)
- copy_size = PAGE_SIZE - page_offset;
- dest = domain_mpa_to_imva(d, dest_gpaddr);
- memcpy(dest, src, copy_size);
-
- src += copy_size;
- dest_gpaddr += copy_size;
- size -= copy_size;
- }
-}
-
-/* copy memory range from domain pseudo physical address space */
-void
-dom_fw_copy_from(void *dest, struct domain *d, unsigned long src_gpaddr,
- size_t size)
-{
- while (size > 0) {
- unsigned long page_offset = src_gpaddr & ~PAGE_MASK;
- size_t copy_size = size;
- void *src;
-
- if (page_offset + copy_size > PAGE_SIZE)
- copy_size = PAGE_SIZE - page_offset;
- src = domain_mpa_to_imva(d, src_gpaddr);
- memcpy(dest, src, copy_size);
-
- dest += copy_size;
- src_gpaddr += copy_size;
- size -= copy_size;
- }
-}
-
-int dom_fw_setup(domain_t * d, unsigned long bp_mpa, unsigned long maxmem)
-{
- int old_domu_builder = 0;
- struct xen_ia64_boot_param *bp;
-
- BUILD_BUG_ON(sizeof(struct fw_tables) >
- (FW_TABLES_END_PADDR_MIN - FW_TABLES_BASE_PADDR));
-
- if (bp_mpa == 0) {
- /* bp_mpa == 0 means this is domain restore case. */
- dom_fw_setup_for_domain_restore(d, maxmem);
- return 0;
- }
-
- /* Create page for boot_param. */
- assign_new_domain_page_if_dom0(d, bp_mpa);
- bp = domain_mpa_to_imva(d, bp_mpa);
- if (d != dom0) {
- /*
- * XXX kludge.
- * when XEN_DOMCTL_arch_setup is called, shared_info can't
- * be accessed by libxc so that memmap_info_pfn isn't
- * initialized. But dom_fw_set_convmem_end() requires it,
- * so here we initialize it.
- * note: domain builder may overwrite memmap_info_num_pages,
- * memmap_info_pfns later.
- */
- if (bp->efi_memmap_size == 0 ||
- XEN_IA64_MEMMAP_INFO_NUM_PAGES(bp) == 0 ||
- XEN_IA64_MEMMAP_INFO_PFN(bp) == 0) {
- /* old domain builder compatibility */
- d->shared_info->arch.memmap_info_num_pages = 1;
- d->shared_info->arch.memmap_info_pfn =
- (maxmem >> PAGE_SHIFT) - 1;
- old_domu_builder = 1;
- } else {
- d->shared_info->arch.memmap_info_num_pages =
- XEN_IA64_MEMMAP_INFO_NUM_PAGES(bp);
- d->shared_info->arch.memmap_info_pfn =
- XEN_IA64_MEMMAP_INFO_PFN(bp);
- /* currently multi page memmap isn't supported */
- if (d->shared_info->arch.memmap_info_num_pages != 1)
- return -ENOSYS;
- }
- }
-
- /* Create page for acpi tables. */
- if (d != dom0 && old_domu_builder) {
- struct fake_acpi_tables *imva;
- imva = domain_mpa_to_imva(d, FW_ACPI_BASE_PADDR);
- dom_fw_fake_acpi(d, imva);
- }
- if (d == dom0 || old_domu_builder) {
- int ret;
- unsigned long imva_hypercall_base;
- size_t fw_tables_size;
- struct fw_tables *fw_tables;
- unsigned long gpaddr;
-
- /* Create page for hypercalls. */
- assign_new_domain_page_if_dom0(d, FW_HYPERCALL_BASE_PADDR);
- imva_hypercall_base = (unsigned long)domain_mpa_to_imva
- (d, FW_HYPERCALL_BASE_PADDR);
-
- /*
- * dom_fw_init()
- * - [FW_HYPERCALL_BASE_PADDR, FW_HYPERCALL_END_PADDR)
- * - [FW_ACPI_BASE_PADDR, FW_ACPI_END_PADDR)
- * - [FW_TABLES_BASE_PADDR, tables->fw_tables_end_paddr)
- *
- * complete_dom0_memmap() for dom0
- * - real machine memory map
- * - memmap_info by setup_dom0_memmap_info()
- *
- * complete_domu_memmap() for old domu builder
- * - I/O port
- * - conventional memory
- * - memmap_info
- */
-#define NUM_EXTRA_MEM_DESCS 4
-
- /* Estimate necessary efi memmap size and allocate memory */
- fw_tables_size = sizeof(*fw_tables) +
- (ia64_boot_param->efi_memmap_size /
- ia64_boot_param->efi_memdesc_size +
- NUM_EXTRA_MEM_DESCS) *
- sizeof(fw_tables->efi_memmap[0]);
- if (fw_tables_size <
- FW_TABLES_END_PADDR_MIN - FW_TABLES_BASE_PADDR)
- fw_tables_size =
- FW_TABLES_END_PADDR_MIN - FW_TABLES_BASE_PADDR;
- fw_tables_size = (fw_tables_size + ((1UL << EFI_PAGE_SHIFT) - 1))
- & ~((1UL << EFI_PAGE_SHIFT) - 1);
- fw_tables =
- (struct fw_tables *)_xmalloc(fw_tables_size,
- __alignof__(*fw_tables));
- if (fw_tables == NULL) {
- dprintk(XENLOG_INFO,
- "can't allocate fw_tables memory size = %ld\n",
- fw_tables_size);
- return -ENOMEM;
- }
- memset(fw_tables, 0, fw_tables_size);
- BUILD_BUG_ON(FW_END_PADDR_MIN != FW_TABLES_END_PADDR_MIN);
- fw_tables->fw_tables_size = fw_tables_size;
- fw_tables->fw_end_paddr = FW_TABLES_BASE_PADDR + fw_tables_size;
- fw_tables->fw_tables_end_paddr =
- FW_TABLES_BASE_PADDR + fw_tables_size;
- fw_tables->num_mds = 0;
-
- /* It is necessary to allocate pages before dom_fw_init()
- * dom_fw_init() uses up page to d->max_pages.
- */
- for (gpaddr = FW_TABLES_BASE_PADDR;
- gpaddr < fw_tables->fw_end_paddr; gpaddr += PAGE_SIZE)
- assign_new_domain_page_if_dom0(d, gpaddr);
-
- ret = dom_fw_init(d, d->arch.breakimm, bp,
- fw_tables, imva_hypercall_base, maxmem);
- if (ret < 0) {
- xfree(fw_tables);
- return ret;
- }
-
- ret = platform_fw_init(d, bp, fw_tables);
- if (ret < 0) {
- xfree(fw_tables);
- return ret;
- }
-
- if (sizeof(*fw_tables) +
- fw_tables->num_mds * sizeof(fw_tables->efi_memmap[0]) >
- fw_tables_size) {
- panic("EFI memmap too large. "
- "Increase NUM_EXTRA_MEM_DESCS.\n"
- "fw_table_size %ld > %ld num_mds %ld "
- "NUM_EXTRA_MEM_DESCS %d.\n",
- fw_tables_size, fw_tables->fw_tables_size,
- fw_tables->num_mds, NUM_EXTRA_MEM_DESCS);
- }
- fw_tables_size = sizeof(*fw_tables) +
- fw_tables->num_mds * sizeof(fw_tables->efi_memmap[0]);
-
- /* clear domain builder internal use member */
- fw_tables->fw_tables_size = 0;
- fw_tables->fw_end_paddr = 0;
- fw_tables->fw_tables_end_paddr = 0;
- fw_tables->num_mds = 0;
-
- /* copy fw_tables into domain pseudo physical address space */
- dom_fw_copy_to(d, FW_TABLES_BASE_PADDR, fw_tables,
- fw_tables_size);
- xfree(fw_tables);
- }
-
- dom_fw_domain_init(d, domain_mpa_to_imva(d, FW_TABLES_BASE_PADDR));
- return dom_fw_set_convmem_end(d);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "linux"
- * c-basic-offset: 8
- * tab-width: 8
- * indent-tabs-mode: t
- * End:
- */
diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c
deleted file mode 100644
index 7faa631b7a..0000000000
--- a/xen/arch/ia64/xen/domain.c
+++ /dev/null
@@ -1,2488 +0,0 @@
-/*
- * Copyright (C) 1995 Linus Torvalds
- *
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@valinux.com>, May 2000
- *
- * Copyright (C) 2005 Intel Co
- * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
- *
- * 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@intel.com> Add VTI domain support
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * dom0 vp model support
- */
-
-#include <xen/config.h>
-#include <xen/init.h>
-#include <xen/lib.h>
-#include <xen/errno.h>
-#include <xen/sched.h>
-#include <xen/smp.h>
-#include <xen/delay.h>
-#include <xen/softirq.h>
-#include <xen/mm.h>
-#include <xen/grant_table.h>
-#include <xen/iocap.h>
-#include <asm/asm-xsi-offsets.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <xen/event.h>
-#include <xen/console.h>
-#include <xen/hypercall.h>
-#include <xen/version.h>
-#include <xen/libelf.h>
-#include <asm/pgalloc.h>
-#include <asm/offsets.h> /* for IA64_THREAD_INFO_SIZE */
-#include <asm/vcpu.h> /* for function declarations */
-#include <public/xen.h>
-#include <xen/domain.h>
-#include <asm/vmx.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/vmx_vpd.h>
-#include <asm/vmx_phy_mode.h>
-#include <asm/vmx_vcpu_save.h>
-#include <asm/vhpt.h>
-#include <asm/vcpu.h>
-#include <asm/tlbflush.h>
-#include <asm/regionreg.h>
-#include <asm/dom_fw.h>
-#include <asm/shadow.h>
-#include <xen/guest_access.h>
-#include <asm/tlb_track.h>
-#include <asm/perfmon.h>
-#include <asm/sal.h>
-#include <public/vcpu.h>
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-#include <asm/debugger.h>
-
-/* dom0_size: default memory allocation for dom0 (~4GB) */
-static unsigned long __initdata dom0_size = 4096UL*1024UL*1024UL;
-
-/* dom0_max_vcpus: maximum number of VCPUs to create for dom0. */
-static unsigned int __initdata dom0_max_vcpus = 4;
-integer_param("dom0_max_vcpus", dom0_max_vcpus);
-
-extern char dom0_command_line[];
-
-/* forward declaration */
-static void init_switch_stack(struct vcpu *v);
-
-/* Address of vpsr.i (in fact evtchn_upcall_mask) of current vcpu.
- This is a Xen virtual address. */
-DEFINE_PER_CPU(uint8_t *, current_psr_i_addr);
-DEFINE_PER_CPU(int *, current_psr_ic_addr);
-
-DEFINE_PER_CPU(struct vcpu *, fp_owner);
-
-#include <xen/sched-if.h>
-
-static void flush_vtlb_for_context_switch(struct vcpu* prev, struct vcpu* next)
-{
- int cpu = smp_processor_id();
- int last_vcpu_id, last_processor;
-
- if (!is_idle_domain(prev->domain))
- tlbflush_update_time
- (&prev->domain->arch.last_vcpu[cpu].tlbflush_timestamp,
- tlbflush_current_time());
-
- if (is_idle_domain(next->domain))
- return;
-
- last_vcpu_id = next->domain->arch.last_vcpu[cpu].vcpu_id;
- last_processor = next->arch.last_processor;
-
- next->domain->arch.last_vcpu[cpu].vcpu_id = next->vcpu_id;
- next->arch.last_processor = cpu;
-
- if ((last_vcpu_id != next->vcpu_id &&
- last_vcpu_id != INVALID_VCPU_ID) ||
- (last_vcpu_id == next->vcpu_id &&
- last_processor != cpu &&
- last_processor != INVALID_PROCESSOR)) {
-#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
- u32 last_tlbflush_timestamp =
- next->domain->arch.last_vcpu[cpu].tlbflush_timestamp;
-#endif
- int vhpt_is_flushed = 0;
-
- // if the vTLB implementation was changed,
- // the followings must be updated either.
- if (VMX_DOMAIN(next)) {
- // currently vTLB for vt-i domian is per vcpu.
- // so any flushing isn't needed.
- } else if (HAS_PERVCPU_VHPT(next->domain)) {
- // nothing to do
- } else {
- if (NEED_FLUSH(__get_cpu_var(vhpt_tlbflush_timestamp),
- last_tlbflush_timestamp)) {
- local_vhpt_flush();
- vhpt_is_flushed = 1;
- }
- }
- if (vhpt_is_flushed || NEED_FLUSH(__get_cpu_var(tlbflush_time),
- last_tlbflush_timestamp)) {
- local_flush_tlb_all();
- perfc_incr(tlbflush_clock_cswitch_purge);
- } else {
- perfc_incr(tlbflush_clock_cswitch_skip);
- }
- perfc_incr(flush_vtlb_for_context_switch);
- }
-}
-
-static void flush_cache_for_context_switch(struct vcpu *next)
-{
- extern cpumask_t cpu_cache_coherent_map;
- int cpu = smp_processor_id();
-
- if (is_idle_vcpu(next) ||
- __test_and_clear_bit(cpu, &next->arch.cache_coherent_map)) {
- if (cpumask_test_and_clear_cpu(cpu, &cpu_cache_coherent_map)) {
- unsigned long flags;
- u64 progress = 0;
- s64 status;
-
- local_irq_save(flags);
- status = ia64_pal_cache_flush(4, 0, &progress, NULL);
- local_irq_restore(flags);
- if (status != 0)
- panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
- "cache_type=4 status %lx", status);
- }
- }
-}
-
-static void set_current_psr_i_addr(struct vcpu* v)
-{
- __ia64_per_cpu_var(current_psr_i_addr) =
- (uint8_t*)(v->domain->arch.shared_info_va +
- INT_ENABLE_OFFSET(v));
- __ia64_per_cpu_var(current_psr_ic_addr) = (int *)
- (v->domain->arch.shared_info_va + XSI_PSR_IC_OFS);
-}
-
-static void clear_current_psr_i_addr(void)
-{
- __ia64_per_cpu_var(current_psr_i_addr) = NULL;
- __ia64_per_cpu_var(current_psr_ic_addr) = NULL;
-}
-
-static void lazy_fp_switch(struct vcpu *prev, struct vcpu *next)
-{
- /*
- * Implement eager save, lazy restore
- */
- if (!is_idle_vcpu(prev)) {
- if (VMX_DOMAIN(prev)) {
- if (FP_PSR(prev) & IA64_PSR_MFH) {
- __ia64_save_fpu(prev->arch._thread.fph);
- __ia64_per_cpu_var(fp_owner) = prev;
- }
- } else {
- if (PSCB(prev, hpsr_mfh)) {
- __ia64_save_fpu(prev->arch._thread.fph);
- __ia64_per_cpu_var(fp_owner) = prev;
- }
- }
- }
-
- if (!is_idle_vcpu(next)) {
- if (VMX_DOMAIN(next)) {
- FP_PSR(next) = IA64_PSR_DFH;
- vcpu_regs(next)->cr_ipsr |= IA64_PSR_DFH;
- } else {
- PSCB(next, hpsr_dfh) = 1;
- PSCB(next, hpsr_mfh) = 0;
- vcpu_regs(next)->cr_ipsr |= IA64_PSR_DFH;
- }
- }
-}
-
-static void load_state(struct vcpu *v)
-{
- load_region_regs(v);
- ia64_set_pta(vcpu_pta(v));
- vcpu_load_kernel_regs(v);
- if (vcpu_pkr_in_use(v))
- vcpu_pkr_load_regs(v);
- set_current_psr_i_addr(v);
-}
-
-void schedule_tail(struct vcpu *prev)
-{
- extern char ia64_ivt;
-
- context_saved(prev);
-
- if (VMX_DOMAIN(current))
- vmx_do_resume(current);
- else {
- if (VMX_DOMAIN(prev))
- ia64_set_iva(&ia64_ivt);
- load_state(current);
- migrate_timer(&current->arch.hlt_timer, current->processor);
- }
- flush_vtlb_for_context_switch(prev, current);
-}
-
-void context_switch(struct vcpu *prev, struct vcpu *next)
-{
- uint64_t spsr;
-
- local_irq_save(spsr);
-
- if (VMX_DOMAIN(prev)) {
- vmx_save_state(prev);
- if (!VMX_DOMAIN(next)) {
- /* VMX domains can change the physical cr.dcr.
- * Restore default to prevent leakage. */
- uint64_t dcr = ia64_getreg(_IA64_REG_CR_DCR);
- /* xenoprof:
- * don't change psr.pp.
- * It is manipulated by xenoprof.
- */
- dcr = (IA64_DEFAULT_DCR_BITS & ~IA64_DCR_PP) | (dcr & IA64_DCR_PP);
- ia64_setreg(_IA64_REG_CR_DCR, dcr);
- }
- }
-
- lazy_fp_switch(prev, current);
-
- if (prev->arch.dbg_used || next->arch.dbg_used) {
- /*
- * Load debug registers either because they are valid or to clear
- * the previous one.
- */
- ia64_load_debug_regs(next->arch.dbr);
- }
-
- /*
- * disable VHPT walker.
- * ia64_switch_to() might cause VHPT fault because it flushes
- * dtr[IA64_TR_VHPT] and reinsert the mapping with dtr[IA64_TR_STACK].
- * (VHPT_SIZE_LOG2 << 2) is just for avoiding
- * Reserved Register/Field fault.
- */
- ia64_set_pta(VHPT_SIZE_LOG2 << 2);
- prev = ia64_switch_to(next);
-
- /* Note: ia64_switch_to does not return here at vcpu initialization. */
-
- if (VMX_DOMAIN(current)) {
- vmx_load_state(current);
- } else {
- extern char ia64_ivt;
-
- if (VMX_DOMAIN(prev))
- ia64_set_iva(&ia64_ivt);
-
- if (!is_idle_vcpu(current)) {
- load_state(current);
- vcpu_set_next_timer(current);
- if (vcpu_timer_expired(current))
- vcpu_pend_timer(current);
- /* steal time accounting */
- if (!guest_handle_is_null(runstate_guest(current)))
- __copy_to_guest(runstate_guest(current), &current->runstate, 1);
- } else {
- /* When switching to idle domain, only need to disable vhpt
- * walker. Then all accesses happen within idle context will
- * be handled by TR mapping and identity mapping.
- */
- clear_current_psr_i_addr();
- }
- }
- local_irq_restore(spsr);
-
- /* lazy fp */
- if (current->processor != current->arch.last_processor) {
- unsigned long *addr;
- addr = (unsigned long *)per_cpu_addr(fp_owner,
- current->arch.last_processor);
- ia64_cmpxchg(acq, addr, current, 0, 8);
- }
-
- flush_vtlb_for_context_switch(prev, current);
- flush_cache_for_context_switch(current);
- context_saved(prev);
-}
-
-void continue_running(struct vcpu *same)
-{
- /* nothing to do */
-}
-
-#ifdef CONFIG_PERFMON
-static int pal_halt = 1;
-static int can_do_pal_halt = 1;
-
-static int __init nohalt_setup(char * str)
-{
- pal_halt = can_do_pal_halt = 0;
- return 1;
-}
-__setup("nohalt", nohalt_setup);
-
-void
-update_pal_halt_status(int status)
-{
- can_do_pal_halt = pal_halt && status;
-}
-#else
-#define can_do_pal_halt (1)
-#endif
-
-static void default_idle(void)
-{
- local_irq_disable();
- if ( cpu_is_haltable(smp_processor_id()) ) {
- if (can_do_pal_halt)
- safe_halt();
- else
- cpu_relax();
- }
- local_irq_enable();
-}
-
-extern void play_dead(void);
-
-static void continue_cpu_idle_loop(void)
-{
- int cpu = smp_processor_id();
-
- for ( ; ; )
- {
-#ifdef IA64
-// __IRQ_STAT(cpu, idle_timestamp) = jiffies
-#else
- irq_stat[cpu].idle_timestamp = jiffies;
-#endif
- while ( cpu_is_haltable(cpu) )
- default_idle();
- raise_softirq(SCHEDULE_SOFTIRQ);
- do_tasklet();
- do_softirq();
- if (!cpu_online(cpu))
- play_dead();
- }
-}
-
-void startup_cpu_idle_loop(void)
-{
- /* Just some sanity to ensure that the scheduler is set up okay. */
- ASSERT(is_idle_vcpu(current));
- raise_softirq(SCHEDULE_SOFTIRQ);
-
- continue_cpu_idle_loop();
-}
-
-/* compile time test for get_order(sizeof(mapped_regs_t)) !=
- * get_order_from_shift(XMAPPEDREGS_SHIFT))
- */
-#if !(((1 << (XMAPPEDREGS_SHIFT - 1)) < MAPPED_REGS_T_SIZE) && \
- (MAPPED_REGS_T_SIZE < (1 << (XMAPPEDREGS_SHIFT + 1))))
-# error "XMAPPEDREGS_SHIFT doesn't match sizeof(mapped_regs_t)."
-#endif
-
-void hlt_timer_fn(void *data)
-{
- struct vcpu *v = data;
- vcpu_unblock(v);
-}
-
-void relinquish_vcpu_resources(struct vcpu *v)
-{
- if (HAS_PERVCPU_VHPT(v->domain))
- pervcpu_vhpt_free(v);
- if (v->arch.privregs != NULL) {
- free_xenheap_pages(v->arch.privregs,
- get_order_from_shift(XMAPPEDREGS_SHIFT));
- v->arch.privregs = NULL;
- }
- kill_timer(&v->arch.hlt_timer);
-}
-
-struct domain *alloc_domain_struct(void)
-{
- struct domain *d;
-#ifdef CONFIG_IA64_PICKLE_DOMAIN
- /*
- * We pack the MFN of the domain structure into a 32-bit field within
- * the page_info structure. Hence the MEMF_bits() restriction.
- */
- d = alloc_xenheap_pages(get_order_from_bytes(sizeof(*d)),
- MEMF_bits(32 + PAGE_SHIFT));
-#else
- d = xmalloc(struct domain);
-#endif
-
- if ( d != NULL )
- memset(d, 0, sizeof(*d));
- return d;
-}
-
-void free_domain_struct(struct domain *d)
-{
-#ifdef CONFIG_IA64_PICKLE_DOMAIN
- free_xenheap_pages(d, get_order_from_bytes(sizeof(*d)));
-#else
- xfree(d);
-#endif
-}
-
-struct vcpu *alloc_vcpu_struct(void)
-{
- struct page_info *page;
- struct vcpu *v;
- struct thread_info *ti;
- static int first_allocation = 1;
-
- if (first_allocation) {
- first_allocation = 0;
- /* Still keep idle vcpu0 static allocated at compilation, due
- * to some code from Linux still requires it in early phase.
- */
- return idle_vcpu[0];
- }
-
- page = alloc_domheap_pages(NULL, KERNEL_STACK_SIZE_ORDER, 0);
- if (page == NULL)
- return NULL;
- v = page_to_virt(page);
- memset(v, 0, sizeof(*v));
-
- ti = alloc_thread_info(v);
- /* Clear thread_info to clear some important fields, like
- * preempt_count
- */
- memset(ti, 0, sizeof(struct thread_info));
- init_switch_stack(v);
-
- return v;
-}
-
-void free_vcpu_struct(struct vcpu *v)
-{
- free_domheap_pages(virt_to_page(v), KERNEL_STACK_SIZE_ORDER);
-}
-
-int vcpu_initialise(struct vcpu *v)
-{
- struct domain *d = v->domain;
-
- if (!is_idle_domain(d)) {
- v->arch.metaphysical_rid_dt = d->arch.metaphysical_rid_dt;
- v->arch.metaphysical_rid_d = d->arch.metaphysical_rid_d;
- /* Set default values to saved_rr. */
- v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rid_dt;
- v->arch.metaphysical_saved_rr4 = d->arch.metaphysical_rid_dt;
-
- /* Is it correct ?
- It depends on the domain rid usage.
-
- A domain may share rid among its processor (eg having a
- global VHPT). In this case, we should also share rid
- among vcpus and the rid range should be the same.
-
- However a domain may have per cpu rid allocation. In
- this case we don't want to share rid among vcpus, but we may
- do it if two vcpus are on the same cpu... */
-
- v->arch.starting_rid = d->arch.starting_rid;
- v->arch.ending_rid = d->arch.ending_rid;
- v->arch.rid_bits = d->arch.rid_bits;
- v->arch.breakimm = d->arch.breakimm;
- v->arch.last_processor = INVALID_PROCESSOR;
- v->arch.vhpt_pg_shift = PAGE_SHIFT;
- }
-
- if (!VMX_DOMAIN(v))
- init_timer(&v->arch.hlt_timer, hlt_timer_fn, v,
- cpumask_any(&cpu_online_map));
-
- return 0;
-}
-
-static void vcpu_share_privregs_with_guest(struct vcpu *v)
-{
- struct domain *d = v->domain;
- int i, order = get_order_from_shift(XMAPPEDREGS_SHIFT);
-
- for (i = 0; i < (1 << order); i++)
- share_xen_page_with_guest(virt_to_page(v->arch.privregs) + i,
- d, XENSHARE_writable);
- /*
- * XXX IA64_XMAPPEDREGS_PADDR
- * assign these pages into guest pseudo physical address
- * space for dom0 to map this page by gmfn.
- * this is necessary for domain save, restore and dump-core.
- */
- for (i = 0; i < XMAPPEDREGS_SIZE; i += PAGE_SIZE)
- assign_domain_page(d, IA64_XMAPPEDREGS_PADDR(v->vcpu_id) + i,
- virt_to_maddr(v->arch.privregs + i));
-}
-
-int vcpu_late_initialise(struct vcpu *v)
-{
- int rc, order;
-
- if (HAS_PERVCPU_VHPT(v->domain)) {
- rc = pervcpu_vhpt_alloc(v);
- if (rc != 0)
- return rc;
- }
-
- /* Create privregs page. */
- order = get_order_from_shift(XMAPPEDREGS_SHIFT);
- v->arch.privregs = alloc_xenheap_pages(order, 0);
- if (v->arch.privregs == NULL)
- return -ENOMEM;
- BUG_ON(v->arch.privregs == NULL);
- memset(v->arch.privregs, 0, 1 << XMAPPEDREGS_SHIFT);
- vcpu_share_privregs_with_guest(v);
-
- return 0;
-}
-
-void vcpu_destroy(struct vcpu *v)
-{
- if (is_hvm_vcpu(v))
- vmx_relinquish_vcpu_resources(v);
- else
- relinquish_vcpu_resources(v);
-}
-
-static unsigned long*
-vcpu_to_rbs_bottom(struct vcpu *v)
-{
- return (unsigned long*)((char *)v + IA64_RBS_OFFSET);
-}
-
-static void init_switch_stack(struct vcpu *v)
-{
- struct pt_regs *regs = vcpu_regs (v);
- struct switch_stack *sw = (struct switch_stack *) regs - 1;
- extern char ia64_ret_from_clone;
-
- memset(sw, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs));
- sw->ar_bspstore = (unsigned long)vcpu_to_rbs_bottom(v);
- sw->b0 = (unsigned long) &ia64_ret_from_clone;
- sw->ar_fpsr = FPSR_DEFAULT;
- v->arch._thread.ksp = (unsigned long) sw - 16;
- // stay on kernel stack because may get interrupts!
- // ia64_ret_from_clone switches to user stack
- v->arch._thread.on_ustack = 0;
- memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
-}
-
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
-static int opt_pervcpu_vhpt = 1;
-integer_param("pervcpu_vhpt", opt_pervcpu_vhpt);
-#endif
-
-int arch_domain_create(struct domain *d, unsigned int domcr_flags)
-{
- int i;
-
- // the following will eventually need to be negotiated dynamically
- d->arch.shared_info_va = DEFAULT_SHAREDINFO_ADDR;
- d->arch.breakimm = __IA64_XEN_HYPERCALL_DEFAULT;
- for (i = 0; i < NR_CPUS; i++) {
- d->arch.last_vcpu[i].vcpu_id = INVALID_VCPU_ID;
- }
-
- if (is_idle_domain(d))
- return 0;
-
- INIT_LIST_HEAD(&d->arch.pdev_list);
- foreign_p2m_init(d);
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
- d->arch.has_pervcpu_vhpt = opt_pervcpu_vhpt;
- dprintk(XENLOG_INFO, "%s:%d domain %d pervcpu_vhpt %d\n",
- __func__, __LINE__, d->domain_id, d->arch.has_pervcpu_vhpt);
-#endif
- if (tlb_track_create(d) < 0)
- goto fail_nomem1;
- d->shared_info = alloc_xenheap_pages(
- get_order_from_shift(XSI_SHIFT), 0);
- if (d->shared_info == NULL)
- goto fail_nomem;
- BUG_ON(d->shared_info == NULL);
- memset(d->shared_info, 0, XSI_SIZE);
- for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)
- share_xen_page_with_guest(virt_to_page((char *)d->shared_info + i),
- d, XENSHARE_writable);
-
- /* We may also need emulation rid for region4, though it's unlikely
- * to see guest issue uncacheable access in metaphysical mode. But
- * keep such info here may be more sane.
- */
- if (!allocate_rid_range(d,0))
- goto fail_nomem;
-
- memset(&d->arch.mm, 0, sizeof(d->arch.mm));
- d->arch.relres = RELRES_not_started;
- d->arch.mm_teardown_offset = 0;
- INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
-
- if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL)
- goto fail_nomem;
-
- if(iommu_domain_init(d) != 0)
- goto fail_iommu;
-
- /*
- * grant_table_create() can't fully initialize grant table for domain
- * because it is called before arch_domain_create().
- * Here we complete the initialization which requires p2m table.
- */
- spin_lock(&d->grant_table->lock);
- for (i = 0; i < nr_grant_frames(d->grant_table); i++)
- ia64_gnttab_create_shared_page(d, d->grant_table, i);
- spin_unlock(&d->grant_table->lock);
-
- d->arch.ioport_caps = rangeset_new(d, "I/O Ports",
- RANGESETF_prettyprint_hex);
-
- dprintk(XENLOG_DEBUG, "arch_domain_create: domain=%p\n", d);
- return 0;
-
-fail_iommu:
- iommu_domain_destroy(d);
-fail_nomem:
- tlb_track_destroy(d);
-fail_nomem1:
- if (d->arch.mm.pgd != NULL)
- pgd_free(d->arch.mm.pgd);
- if (d->shared_info != NULL)
- free_xenheap_pages(d->shared_info,
- get_order_from_shift(XSI_SHIFT));
- return -ENOMEM;
-}
-
-void arch_domain_destroy(struct domain *d)
-{
- mm_final_teardown(d);
-
- if (d->shared_info != NULL)
- free_xenheap_pages(d->shared_info,
- get_order_from_shift(XSI_SHIFT));
-
- if ( iommu_enabled && need_iommu(d) )
- iommu_domain_destroy(d);
-
- tlb_track_destroy(d);
-
- /* Clear vTLB for the next domain. */
- domain_flush_tlb_vhpt(d);
-
- deallocate_rid_range(d);
-}
-
-void arch_vcpu_reset(struct vcpu *v)
-{
- /* FIXME: Stub for now */
-}
-
-/* Here it is assumed that all of the CPUs has same RSE.N_STACKED_PHYS */
-static unsigned long num_phys_stacked;
-static int __init
-init_num_phys_stacked(void)
-{
- switch (ia64_pal_rse_info(&num_phys_stacked, NULL)) {
- case 0L:
- printk("the number of physical stacked general registers"
- "(RSE.N_STACKED_PHYS) = %ld\n", num_phys_stacked);
- return 0;
- case -2L:
- case -3L:
- default:
- break;
- }
- printk("WARNING: PAL_RSE_INFO call failed. "
- "domain save/restore may NOT work!\n");
- return -EINVAL;
-}
-__initcall(init_num_phys_stacked);
-
-#define COPY_FPREG(dst, src) memcpy(dst, src, sizeof(struct ia64_fpreg))
-
-#define AR_PFS_PEC_SHIFT 51
-#define AR_PFS_REC_SIZE 6
-#define AR_PFS_PEC_MASK (((1UL << 6) - 1) << 51)
-
-/*
- * See init_swtich_stack() and ptrace.h
- */
-static struct switch_stack*
-vcpu_to_switch_stack(struct vcpu* v)
-{
- return (struct switch_stack *)(v->arch._thread.ksp + 16);
-}
-
-static int
-vcpu_has_not_run(struct vcpu* v)
-{
- extern char ia64_ret_from_clone;
- struct switch_stack *sw = vcpu_to_switch_stack(v);
-
- return (sw == (struct switch_stack *)(vcpu_regs(v)) - 1) &&
- (sw->b0 == (unsigned long)&ia64_ret_from_clone);
-}
-
-static void
-nats_update(unsigned int* nats, unsigned int reg, char nat)
-{
- BUG_ON(reg > 31);
-
- if (nat)
- *nats |= (1UL << reg);
- else
- *nats &= ~(1UL << reg);
-}
-
-static unsigned long
-__vcpu_get_itc(struct vcpu *v)
-{
- unsigned long itc_last;
- unsigned long itc_offset;
- unsigned long itc;
-
- if (unlikely(v->arch.privregs == NULL))
- return ia64_get_itc();
-
- itc_last = v->arch.privregs->itc_last;
- itc_offset = v->arch.privregs->itc_offset;
- itc = ia64_get_itc();
- itc += itc_offset;
- if (itc_last >= itc)
- itc = itc_last;
- return itc;
-}
-
-static void
-__vcpu_set_itc(struct vcpu *v, u64 val)
-{
- unsigned long itc;
- unsigned long itc_offset;
- unsigned long itc_last;
-
- BUG_ON(v->arch.privregs == NULL);
-
- if (v != current)
- vcpu_pause(v);
-
- itc = ia64_get_itc();
- itc_offset = val - itc;
- itc_last = val;
-
- v->arch.privregs->itc_offset = itc_offset;
- v->arch.privregs->itc_last = itc_last;
-
- if (v != current)
- vcpu_unpause(v);
-}
-
-void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
-{
- int i;
- struct vcpu_tr_regs *tr = &c.nat->regs.tr;
- struct cpu_user_regs *uregs = vcpu_regs(v);
- struct switch_stack *sw = vcpu_to_switch_stack(v);
- struct unw_frame_info info;
- int is_hvm = VMX_DOMAIN(v);
- unsigned int rbs_size;
- unsigned long *const rbs_bottom = vcpu_to_rbs_bottom(v);
- unsigned long *rbs_top;
- unsigned long *rbs_rnat_addr;
- unsigned int top_slot;
- unsigned int num_regs;
-
- memset(c.nat, 0, sizeof(*c.nat));
- c.nat->regs.b[6] = uregs->b6;
- c.nat->regs.b[7] = uregs->b7;
-
- memset(&info, 0, sizeof(info));
- unw_init_from_blocked_task(&info, v);
- if (vcpu_has_not_run(v)) {
- c.nat->regs.ar.lc = sw->ar_lc;
- c.nat->regs.ar.ec =
- (sw->ar_pfs & AR_PFS_PEC_MASK) >> AR_PFS_PEC_SHIFT;
- } else if (unw_unwind_to_user(&info) < 0) {
- /* warn: should panic? */
- gdprintk(XENLOG_ERR, "vcpu=%d unw_unwind_to_user() failed.\n",
- v->vcpu_id);
- show_stack(v, NULL);
-
- /* can't return error */
- c.nat->regs.ar.lc = 0;
- c.nat->regs.ar.ec = 0;
- } else {
- unw_get_ar(&info, UNW_AR_LC, &c.nat->regs.ar.lc);
- unw_get_ar(&info, UNW_AR_EC, &c.nat->regs.ar.ec);
- }
-
- if (!is_hvm)
- c.nat->regs.ar.itc = __vcpu_get_itc(v);
-
- c.nat->regs.ar.csd = uregs->ar_csd;
- c.nat->regs.ar.ssd = uregs->ar_ssd;
-
- c.nat->regs.r[8] = uregs->r8;
- c.nat->regs.r[9] = uregs->r9;
- c.nat->regs.r[10] = uregs->r10;
- c.nat->regs.r[11] = uregs->r11;
-
- if (is_hvm)
- c.nat->regs.psr = vmx_vcpu_get_psr(v);
- else
- c.nat->regs.psr = vcpu_get_psr(v);
-
- c.nat->regs.ip = uregs->cr_iip;
- c.nat->regs.cfm = uregs->cr_ifs;
-
- c.nat->regs.ar.unat = uregs->ar_unat;
- c.nat->regs.ar.pfs = uregs->ar_pfs;
- c.nat->regs.ar.rsc = uregs->ar_rsc;
- c.nat->regs.ar.rnat = uregs->ar_rnat;
- c.nat->regs.ar.bspstore = uregs->ar_bspstore;
-
- c.nat->regs.pr = uregs->pr;
- c.nat->regs.b[0] = uregs->b0;
- rbs_size = uregs->loadrs >> 16;
- num_regs = ia64_rse_num_regs(rbs_bottom,
- (unsigned long*)((char*)rbs_bottom + rbs_size));
- c.nat->regs.ar.bsp = (unsigned long)ia64_rse_skip_regs(
- (unsigned long*)c.nat->regs.ar.bspstore, num_regs);
- BUG_ON(num_regs > num_phys_stacked);
-
- c.nat->regs.r[1] = uregs->r1;
- c.nat->regs.r[12] = uregs->r12;
- c.nat->regs.r[13] = uregs->r13;
- c.nat->regs.ar.fpsr = uregs->ar_fpsr;
- c.nat->regs.r[15] = uregs->r15;
-
- c.nat->regs.r[14] = uregs->r14;
- c.nat->regs.r[2] = uregs->r2;
- c.nat->regs.r[3] = uregs->r3;
- c.nat->regs.r[16] = uregs->r16;
- c.nat->regs.r[17] = uregs->r17;
- c.nat->regs.r[18] = uregs->r18;
- c.nat->regs.r[19] = uregs->r19;
- c.nat->regs.r[20] = uregs->r20;
- c.nat->regs.r[21] = uregs->r21;
- c.nat->regs.r[22] = uregs->r22;
- c.nat->regs.r[23] = uregs->r23;
- c.nat->regs.r[24] = uregs->r24;
- c.nat->regs.r[25] = uregs->r25;
- c.nat->regs.r[26] = uregs->r26;
- c.nat->regs.r[27] = uregs->r27;
- c.nat->regs.r[28] = uregs->r28;
- c.nat->regs.r[29] = uregs->r29;
- c.nat->regs.r[30] = uregs->r30;
- c.nat->regs.r[31] = uregs->r31;
-
- c.nat->regs.ar.ccv = uregs->ar_ccv;
-
- COPY_FPREG(&c.nat->regs.f[2], &sw->f2);
- COPY_FPREG(&c.nat->regs.f[3], &sw->f3);
- COPY_FPREG(&c.nat->regs.f[4], &sw->f4);
- COPY_FPREG(&c.nat->regs.f[5], &sw->f5);
-
- COPY_FPREG(&c.nat->regs.f[6], &uregs->f6);
- COPY_FPREG(&c.nat->regs.f[7], &uregs->f7);
- COPY_FPREG(&c.nat->regs.f[8], &uregs->f8);
- COPY_FPREG(&c.nat->regs.f[9], &uregs->f9);
- COPY_FPREG(&c.nat->regs.f[10], &uregs->f10);
- COPY_FPREG(&c.nat->regs.f[11], &uregs->f11);
-
- COPY_FPREG(&c.nat->regs.f[12], &sw->f12);
- COPY_FPREG(&c.nat->regs.f[13], &sw->f13);
- COPY_FPREG(&c.nat->regs.f[14], &sw->f14);
- COPY_FPREG(&c.nat->regs.f[15], &sw->f15);
- COPY_FPREG(&c.nat->regs.f[16], &sw->f16);
- COPY_FPREG(&c.nat->regs.f[17], &sw->f17);
- COPY_FPREG(&c.nat->regs.f[18], &sw->f18);
- COPY_FPREG(&c.nat->regs.f[19], &sw->f19);
- COPY_FPREG(&c.nat->regs.f[20], &sw->f20);
- COPY_FPREG(&c.nat->regs.f[21], &sw->f21);
- COPY_FPREG(&c.nat->regs.f[22], &sw->f22);
- COPY_FPREG(&c.nat->regs.f[23], &sw->f23);
- COPY_FPREG(&c.nat->regs.f[24], &sw->f24);
- COPY_FPREG(&c.nat->regs.f[25], &sw->f25);
- COPY_FPREG(&c.nat->regs.f[26], &sw->f26);
- COPY_FPREG(&c.nat->regs.f[27], &sw->f27);
- COPY_FPREG(&c.nat->regs.f[28], &sw->f28);
- COPY_FPREG(&c.nat->regs.f[29], &sw->f29);
- COPY_FPREG(&c.nat->regs.f[30], &sw->f30);
- COPY_FPREG(&c.nat->regs.f[31], &sw->f31);
-
- // f32 - f127
- memcpy(&c.nat->regs.f[32], &v->arch._thread.fph[0],
- sizeof(v->arch._thread.fph));
-
-#define NATS_UPDATE(reg) \
- nats_update(&c.nat->regs.nats, (reg), \
- !!(uregs->eml_unat & \
- (1UL << ia64_unat_pos(&uregs->r ## reg))))
-
- // corresponding bit in ar.unat is determined by
- // (&uregs->rN){8:3}.
- // r8: the lowest gr member of struct cpu_user_regs.
- // r7: the highest gr member of struct cpu_user_regs.
- BUILD_BUG_ON(offsetof(struct cpu_user_regs, r7) -
- offsetof(struct cpu_user_regs, r8) >
- 64 * sizeof(unsigned long));
-
- NATS_UPDATE(1);
- NATS_UPDATE(2);
- NATS_UPDATE(3);
-
- NATS_UPDATE(8);
- NATS_UPDATE(9);
- NATS_UPDATE(10);
- NATS_UPDATE(11);
- NATS_UPDATE(12);
- NATS_UPDATE(13);
- NATS_UPDATE(14);
- NATS_UPDATE(15);
- NATS_UPDATE(16);
- NATS_UPDATE(17);
- NATS_UPDATE(18);
- NATS_UPDATE(19);
- NATS_UPDATE(20);
- NATS_UPDATE(21);
- NATS_UPDATE(22);
- NATS_UPDATE(23);
- NATS_UPDATE(24);
- NATS_UPDATE(25);
- NATS_UPDATE(26);
- NATS_UPDATE(27);
- NATS_UPDATE(28);
- NATS_UPDATE(29);
- NATS_UPDATE(30);
- NATS_UPDATE(31);
-
- if (!is_hvm) {
- c.nat->regs.r[4] = uregs->r4;
- c.nat->regs.r[5] = uregs->r5;
- c.nat->regs.r[6] = uregs->r6;
- c.nat->regs.r[7] = uregs->r7;
-
- NATS_UPDATE(4);
- NATS_UPDATE(5);
- NATS_UPDATE(6);
- NATS_UPDATE(7);
-#undef NATS_UPDATE
- } else {
- /*
- * for VTi domain, r[4-7] are saved sometimes both in
- * uregs->r[4-7] and memory stack or only in memory stack.
- * So it is ok to get them from memory stack.
- */
- if (vcpu_has_not_run(v)) {
- c.nat->regs.r[4] = sw->r4;
- c.nat->regs.r[5] = sw->r5;
- c.nat->regs.r[6] = sw->r6;
- c.nat->regs.r[7] = sw->r7;
-
- nats_update(&c.nat->regs.nats, 4,
- !!(sw->ar_unat &
- (1UL << ia64_unat_pos(&sw->r4))));
- nats_update(&c.nat->regs.nats, 5,
- !!(sw->ar_unat &
- (1UL << ia64_unat_pos(&sw->r5))));
- nats_update(&c.nat->regs.nats, 6,
- !!(sw->ar_unat &
- (1UL << ia64_unat_pos(&sw->r6))));
- nats_update(&c.nat->regs.nats, 7,
- !!(sw->ar_unat &
- (1UL << ia64_unat_pos(&sw->r7))));
- } else {
- char nat;
-
- unw_get_gr(&info, 4, &c.nat->regs.r[4], &nat);
- nats_update(&c.nat->regs.nats, 4, nat);
- unw_get_gr(&info, 5, &c.nat->regs.r[5], &nat);
- nats_update(&c.nat->regs.nats, 5, nat);
- unw_get_gr(&info, 6, &c.nat->regs.r[6], &nat);
- nats_update(&c.nat->regs.nats, 6, nat);
- unw_get_gr(&info, 7, &c.nat->regs.r[7], &nat);
- nats_update(&c.nat->regs.nats, 7, nat);
- }
- }
-
- c.nat->regs.rbs_voff = (IA64_RBS_OFFSET / 8) % 64;
- if (unlikely(rbs_size > sizeof(c.nat->regs.rbs)))
- gdprintk(XENLOG_INFO,
- "rbs_size is too large 0x%x > 0x%lx\n",
- rbs_size, sizeof(c.nat->regs.rbs));
- else
- memcpy(c.nat->regs.rbs, rbs_bottom, rbs_size);
-
- rbs_top = (unsigned long*)((char *)rbs_bottom + rbs_size) - 1;
- rbs_rnat_addr = ia64_rse_rnat_addr(rbs_top);
- if ((unsigned long)rbs_rnat_addr >= sw->ar_bspstore)
- rbs_rnat_addr = &sw->ar_rnat;
-
- top_slot = ia64_rse_slot_num(rbs_top);
-
- c.nat->regs.rbs_rnat = (*rbs_rnat_addr) & ((1UL << top_slot) - 1);
- if (ia64_rse_rnat_addr(rbs_bottom) == ia64_rse_rnat_addr(rbs_top)) {
- unsigned int bottom_slot = ia64_rse_slot_num(rbs_bottom);
- c.nat->regs.rbs_rnat &= ~((1UL << bottom_slot) - 1);
- }
-
- c.nat->regs.num_phys_stacked = num_phys_stacked;
-
- if (VMX_DOMAIN(v))
- c.nat->privregs_pfn = VGC_PRIVREGS_HVM;
- else
- c.nat->privregs_pfn = get_gpfn_from_mfn(
- virt_to_maddr(v->arch.privregs) >> PAGE_SHIFT);
-
- for (i = 0; i < IA64_NUM_DBG_REGS; i++) {
- if (VMX_DOMAIN(v)) {
- vmx_vcpu_get_dbr(v, i, &c.nat->regs.dbr[i]);
- vmx_vcpu_get_ibr(v, i, &c.nat->regs.ibr[i]);
- } else {
- vcpu_get_dbr(v, i, &c.nat->regs.dbr[i]);
- vcpu_get_ibr(v, i, &c.nat->regs.ibr[i]);
- }
- }
-
- for (i = 0; i < 8; i++)
- vcpu_get_rr(v, (unsigned long)i << 61, &c.nat->regs.rr[i]);
-
- /* Fill extra regs. */
- for (i = 0;
- (i < sizeof(tr->itrs) / sizeof(tr->itrs[0])) && i < NITRS;
- i++) {
- tr->itrs[i].pte = v->arch.itrs[i].pte.val;
- tr->itrs[i].itir = v->arch.itrs[i].itir;
- tr->itrs[i].vadr = v->arch.itrs[i].vadr;
- tr->itrs[i].rid = v->arch.itrs[i].rid;
- }
- for (i = 0;
- (i < sizeof(tr->dtrs) / sizeof(tr->dtrs[0])) && i < NDTRS;
- i++) {
- tr->dtrs[i].pte = v->arch.dtrs[i].pte.val;
- tr->dtrs[i].itir = v->arch.dtrs[i].itir;
- tr->dtrs[i].vadr = v->arch.dtrs[i].vadr;
- tr->dtrs[i].rid = v->arch.dtrs[i].rid;
- }
- c.nat->event_callback_ip = v->arch.event_callback_ip;
-
- /* If PV and privregs is not set, we can't read mapped registers. */
- if (!is_hvm_vcpu(v) && v->arch.privregs == NULL)
- return;
-
- vcpu_get_dcr(v, &c.nat->regs.cr.dcr);
-
- c.nat->regs.cr.itm = is_hvm_vcpu(v) ?
- vmx_vcpu_get_itm(v) : PSCBX(v, domain_itm);
- vcpu_get_iva(v, &c.nat->regs.cr.iva);
- vcpu_get_pta(v, &c.nat->regs.cr.pta);
-
- vcpu_get_ipsr(v, &c.nat->regs.cr.ipsr);
- vcpu_get_isr(v, &c.nat->regs.cr.isr);
- vcpu_get_iip(v, &c.nat->regs.cr.iip);
- vcpu_get_ifa(v, &c.nat->regs.cr.ifa);
- vcpu_get_itir(v, &c.nat->regs.cr.itir);
- vcpu_get_iha(v, &c.nat->regs.cr.iha);
-
- //XXX change irr[] and arch.insvc[]
- if (is_hvm_vcpu(v))
- /* c.nat->regs.cr.ivr = vmx_vcpu_get_ivr(v)*/;//XXXnot SMP-safe
- else
- vcpu_get_ivr (v, &c.nat->regs.cr.ivr);
- vcpu_get_iim(v, &c.nat->regs.cr.iim);
-
- vcpu_get_tpr(v, &c.nat->regs.cr.tpr);
- vcpu_get_irr0(v, &c.nat->regs.cr.irr[0]);
- vcpu_get_irr1(v, &c.nat->regs.cr.irr[1]);
- vcpu_get_irr2(v, &c.nat->regs.cr.irr[2]);
- vcpu_get_irr3(v, &c.nat->regs.cr.irr[3]);
- vcpu_get_itv(v, &c.nat->regs.cr.itv);//XXX vlsapic
- vcpu_get_pmv(v, &c.nat->regs.cr.pmv);
- vcpu_get_cmcv(v, &c.nat->regs.cr.cmcv);
-
- if (is_hvm)
- vmx_arch_get_info_guest(v, c);
-}
-
-#if 0
-// for debug
-static void
-__rbs_print(const char* func, int line, const char* name,
- const unsigned long* rbs, unsigned int rbs_size)
-{
- unsigned int i;
- printk("%s:%d %s rbs %p\n", func, line, name, rbs);
- printk(" rbs_size 0x%016x no 0x%lx\n",
- rbs_size, rbs_size / sizeof(unsigned long));
-
- for (i = 0; i < rbs_size / sizeof(unsigned long); i++) {
- const char* zero_or_n = "0x";
- if (ia64_rse_is_rnat_slot((unsigned long*)&rbs[i]))
- zero_or_n = "Nx";
-
- if ((i % 3) == 0)
- printk("0x%02x:", i);
- printk(" %s%016lx", zero_or_n, rbs[i]);
- if ((i % 3) == 2)
- printk("\n");
- }
- printk("\n");
-}
-
-#define rbs_print(rbs, rbs_size) \
- __rbs_print(__func__, __LINE__, (#rbs), (rbs), (rbs_size))
-#endif
-
-static int
-copy_rbs(struct vcpu* v, unsigned long* dst_rbs_size,
- const unsigned long* rbs, unsigned long rbs_size,
- unsigned long src_rnat, unsigned long rbs_voff)
-{
- int rc = -EINVAL;
- struct page_info* page;
- unsigned char* vaddr;
- unsigned long* src_bsp;
- unsigned long* src_bspstore;
-
- struct switch_stack* sw = vcpu_to_switch_stack(v);
- unsigned long num_regs;
- unsigned long* dst_bsp;
- unsigned long* dst_bspstore;
- unsigned long* dst_rnat;
- unsigned long dst_rnat_tmp;
- unsigned long dst_rnat_mask;
- unsigned long flags;
- extern void ia64_copy_rbs(unsigned long* dst_bspstore,
- unsigned long* dst_rbs_size,
- unsigned long* dst_rnat_p,
- unsigned long* src_bsp,
- unsigned long src_rbs_size,
- unsigned long src_rnat);
-
- dst_bspstore = vcpu_to_rbs_bottom(v);
- *dst_rbs_size = rbs_size;
- if (rbs_size == 0)
- return 0;
-
- // rbs offset depends on sizeof(struct vcpu) so that
- // it's too unstable for hypercall ABI.
- // we need to take rbs offset into acount.
- //memcpy(dst_bspstore, c.nat->regs.rbs, rbs_size);
-
- // It is assumed that rbs_size is small enough compared
- // to KERNEL_STACK_SIZE.
- page = alloc_domheap_pages(NULL, KERNEL_STACK_SIZE_ORDER, 0);
- if (page == NULL)
- return -ENOMEM;
- vaddr = page_to_virt(page);
-
- src_bspstore = (unsigned long*)(vaddr + rbs_voff * 8);
- src_bsp = (unsigned long*)((unsigned char*)src_bspstore + rbs_size);
- if ((unsigned long)src_bsp >= (unsigned long)vaddr + PAGE_SIZE)
- goto out;
- memcpy(src_bspstore, rbs, rbs_size);
-
- num_regs = ia64_rse_num_regs(src_bspstore, src_bsp);
- dst_bsp = ia64_rse_skip_regs(dst_bspstore, num_regs);
- *dst_rbs_size = (unsigned long)dst_bsp - (unsigned long)dst_bspstore;
-
- // rough check.
- if (((unsigned long)dst_bsp & ~PAGE_MASK) > KERNEL_STACK_SIZE / 2)
- goto out;
-
- // ia64_copy_rbs() uses real cpu's stack register.
- // So it may fault with an Illigal Operation fault resulting
- // in panic if rbs_size is too large to load compared to
- // the number of physical stacked registers, RSE.N_STACKED_PHYS,
- // which is cpu implementatin specific.
- // See SDM vol. 2 Register Stack Engine 6, especially 6.5.5.
- //
- // For safe operation and cpu model independency,
- // we need to copy them by hand without loadrs and flushrs
- // However even if we implement that, similar issue still occurs
- // when running guest. CPU context restore routine issues loadrs
- // resulting in Illegal Operation fault. And what if the vRSE is in
- // enforced lazy mode? We can't store any dirty stacked registers
- // into RBS without cover or br.call.
- if (num_regs > num_phys_stacked) {
- rc = -ENOSYS;
- gdprintk(XENLOG_WARNING,
- "%s:%d domain %d: can't load stacked registres\n"
- "requested size 0x%lx => 0x%lx, num regs %ld"
- "RSE.N_STACKED_PHYS %ld\n",
- __func__, __LINE__, v->domain->domain_id,
- rbs_size, *dst_rbs_size, num_regs,
- num_phys_stacked);
- goto out;
- }
-
- // we mask interrupts to avoid using register backing store.
- local_irq_save(flags);
- ia64_copy_rbs(dst_bspstore, dst_rbs_size, &dst_rnat_tmp,
- src_bsp, rbs_size, src_rnat);
- local_irq_restore(flags);
-
- dst_rnat_mask = (1UL << ia64_rse_slot_num(dst_bsp)) - 1;
- dst_rnat = ia64_rse_rnat_addr(dst_bsp);
- if ((unsigned long)dst_rnat > sw->ar_bspstore)
- dst_rnat = &sw->ar_rnat;
- // if ia64_rse_rnat_addr(dst_bsp) ==
- // ia64_rse_rnat_addr(vcpu_to_rbs_bottom(v)), the lsb bit of rnat
- // is just ignored. so we don't have to mask it out.
- *dst_rnat =
- (*dst_rnat & ~dst_rnat_mask) | (dst_rnat_tmp & dst_rnat_mask);
-
- rc = 0;
-out:
- free_domheap_pages(page, KERNEL_STACK_SIZE_ORDER);
- return rc;
-}
-
-static void
-unat_update(unsigned long *unat_eml, unsigned long *spill_addr, char nat)
-{
- unsigned int pos = ia64_unat_pos(spill_addr);
- if (nat)
- *unat_eml |= (1UL << pos);
- else
- *unat_eml &= ~(1UL << pos);
-}
-
-int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c)
-{
- struct cpu_user_regs *uregs = vcpu_regs(v);
- struct domain *d = v->domain;
- struct switch_stack *sw = vcpu_to_switch_stack(v);
- int was_initialised = v->is_initialised;
- struct unw_frame_info info;
- unsigned int rbs_size;
- unsigned int num_regs;
- unsigned long * const rbs_bottom = vcpu_to_rbs_bottom(v);
- int rc = 0;
- int i;
-
- /* Finish vcpu initialization. */
- if (!was_initialised) {
- if (is_hvm_domain(d))
- rc = vmx_final_setup_guest(v);
- else
- rc = vcpu_late_initialise(v);
- if (rc != 0)
- return rc;
-
- vcpu_init_regs(v);
-
- v->is_initialised = 1;
- /* Auto-online VCPU0 when it is initialised. */
- if (v->vcpu_id == 0 || (c.nat != NULL &&
- c.nat->flags & VGCF_online))
- clear_bit(_VPF_down, &v->pause_flags);
- }
-
- if (c.nat == NULL)
- return 0;
-
- uregs->b6 = c.nat->regs.b[6];
- uregs->b7 = c.nat->regs.b[7];
-
- memset(&info, 0, sizeof(info));
- unw_init_from_blocked_task(&info, v);
- if (vcpu_has_not_run(v)) {
- sw->ar_lc = c.nat->regs.ar.lc;
- sw->ar_pfs =
- (sw->ar_pfs & ~AR_PFS_PEC_MASK) |
- ((c.nat->regs.ar.ec << AR_PFS_PEC_SHIFT) &
- AR_PFS_PEC_MASK);
- } else if (unw_unwind_to_user(&info) < 0) {
- /* warn: should panic? */
- gdprintk(XENLOG_ERR,
- "vcpu=%d unw_unwind_to_user() failed.\n",
- v->vcpu_id);
- show_stack(v, NULL);
-
- //return -ENOSYS;
- } else {
- unw_set_ar(&info, UNW_AR_LC, c.nat->regs.ar.lc);
- unw_set_ar(&info, UNW_AR_EC, c.nat->regs.ar.ec);
- }
-
- if (!is_hvm_domain(d) && (c.nat->flags & VGCF_SET_AR_ITC))
- __vcpu_set_itc(v, c.nat->regs.ar.itc);
-
- uregs->ar_csd = c.nat->regs.ar.csd;
- uregs->ar_ssd = c.nat->regs.ar.ssd;
-
- uregs->r8 = c.nat->regs.r[8];
- uregs->r9 = c.nat->regs.r[9];
- uregs->r10 = c.nat->regs.r[10];
- uregs->r11 = c.nat->regs.r[11];
-
- if (!is_hvm_domain(d))
- vcpu_set_psr(v, c.nat->regs.psr);
- else
- vmx_vcpu_set_psr(v, c.nat->regs.psr);
- uregs->cr_iip = c.nat->regs.ip;
- uregs->cr_ifs = c.nat->regs.cfm;
-
- uregs->ar_unat = c.nat->regs.ar.unat;
- uregs->ar_pfs = c.nat->regs.ar.pfs;
- uregs->ar_rsc = c.nat->regs.ar.rsc;
- uregs->ar_rnat = c.nat->regs.ar.rnat;
- uregs->ar_bspstore = c.nat->regs.ar.bspstore;
-
- uregs->pr = c.nat->regs.pr;
- uregs->b0 = c.nat->regs.b[0];
- num_regs = ia64_rse_num_regs((unsigned long*)c.nat->regs.ar.bspstore,
- (unsigned long*)c.nat->regs.ar.bsp);
- rbs_size = (unsigned long)ia64_rse_skip_regs(rbs_bottom, num_regs) -
- (unsigned long)rbs_bottom;
- if (rbs_size > sizeof (c.nat->regs.rbs)) {
- gdprintk(XENLOG_INFO,
- "rbs size is too large %x > %lx\n",
- rbs_size, sizeof (c.nat->regs.rbs));
- return -EINVAL;
- }
- if (rbs_size > 0 &&
- ((IA64_RBS_OFFSET / 8) % 64) != c.nat->regs.rbs_voff)
- gdprintk(XENLOG_INFO,
- "rbs stack offset is different! xen 0x%x given 0x%x",
- (IA64_RBS_OFFSET / 8) % 64, c.nat->regs.rbs_voff);
-
- /* Protection against crazy user code. */
- if (!was_initialised)
- uregs->loadrs = (rbs_size << 16);
- if (rbs_size == (uregs->loadrs >> 16)) {
- unsigned long dst_rbs_size = 0;
- if (vcpu_has_not_run(v))
- sw->ar_bspstore = (unsigned long)rbs_bottom;
-
- rc = copy_rbs(v, &dst_rbs_size,
- c.nat->regs.rbs, rbs_size,
- c.nat->regs.rbs_rnat,
- c.nat->regs.rbs_voff);
- if (rc < 0)
- return rc;
-
- /* In case of newly created vcpu, ar_bspstore points to
- * the bottom of register stack. Move it up.
- * See also init_switch_stack().
- */
- if (vcpu_has_not_run(v)) {
- uregs->loadrs = (dst_rbs_size << 16);
- sw->ar_bspstore = (unsigned long)((char*)rbs_bottom +
- dst_rbs_size);
- }
- }
-
- // inhibit save/restore between cpus of different RSE.N_STACKED_PHYS.
- // to avoid nasty issues.
- //
- // The number of physical stacked general register(RSE.N_STACKED_PHYS)
- // isn't virtualized. Guest OS utilizes it via PAL_RSE_INFO call and
- // the value might be exported to user/user process.
- // (Linux does via /proc/cpuinfo)
- // The SDM says only that the number is cpu implementation specific.
- //
- // If the number of restoring cpu is different from one of saving cpu,
- // the following, or something worse, might happen.
- // - Xen VMM itself may panic when issuing loadrs to run guest with
- // illegal operation fault
- // When RSE.N_STACKED_PHYS of saving CPU > RSE.N_STACKED_PHYS of
- // restoring CPU
- // This case is detected to refuse restore by rbs_copy()
- // - guest kernel may panic with illegal operation fault
- // When RSE.N_STACKED_PHYS of saving CPU > RSE.N_STACKED_PHYS of
- // restoring CPU
- // - infomation leak from guest kernel to user process
- // When RSE.N_STACKED_PHYS of saving CPU < RSE.N_STACKED_PHYS of
- // restoring CPU
- // Before returning to user process, kernel should zero clear all
- // physical stacked resgisters to prevent kernel bits leak.
- // It would be based on RSE.N_STACKED_PHYS (Linux does.).
- // On the restored environtment the kernel clears only a part
- // of the physical stacked registers.
- // - user processes or human operators would be confused.
- // RSE.N_STACKED_PHYS might be exported to user process or human
- // operators. Actually on linux it is exported via /proc/cpuinfo.
- // user processes might use it.
- // I don't know any concrete example, but it's possible in theory.
- // e.g. thread libraly may allocate RBS area based on the value.
- // (Fortunately glibc nptl doesn't)
- if (c.nat->regs.num_phys_stacked != 0 && /* COMPAT */
- c.nat->regs.num_phys_stacked != num_phys_stacked) {
- gdprintk(XENLOG_WARNING,
- "num phys stacked is different! "
- "xen 0x%lx given 0x%lx",
- num_phys_stacked, c.nat->regs.num_phys_stacked);
- return -EINVAL;
- }
-
- uregs->r1 = c.nat->regs.r[1];
- uregs->r12 = c.nat->regs.r[12];
- uregs->r13 = c.nat->regs.r[13];
- uregs->ar_fpsr = c.nat->regs.ar.fpsr;
- uregs->r15 = c.nat->regs.r[15];
-
- uregs->r14 = c.nat->regs.r[14];
- uregs->r2 = c.nat->regs.r[2];
- uregs->r3 = c.nat->regs.r[3];
- uregs->r16 = c.nat->regs.r[16];
- uregs->r17 = c.nat->regs.r[17];
- uregs->r18 = c.nat->regs.r[18];
- uregs->r19 = c.nat->regs.r[19];
- uregs->r20 = c.nat->regs.r[20];
- uregs->r21 = c.nat->regs.r[21];
- uregs->r22 = c.nat->regs.r[22];
- uregs->r23 = c.nat->regs.r[23];
- uregs->r24 = c.nat->regs.r[24];
- uregs->r25 = c.nat->regs.r[25];
- uregs->r26 = c.nat->regs.r[26];
- uregs->r27 = c.nat->regs.r[27];
- uregs->r28 = c.nat->regs.r[28];
- uregs->r29 = c.nat->regs.r[29];
- uregs->r30 = c.nat->regs.r[30];
- uregs->r31 = c.nat->regs.r[31];
-
- uregs->ar_ccv = c.nat->regs.ar.ccv;
-
- COPY_FPREG(&sw->f2, &c.nat->regs.f[2]);
- COPY_FPREG(&sw->f3, &c.nat->regs.f[3]);
- COPY_FPREG(&sw->f4, &c.nat->regs.f[4]);
- COPY_FPREG(&sw->f5, &c.nat->regs.f[5]);
-
- COPY_FPREG(&uregs->f6, &c.nat->regs.f[6]);
- COPY_FPREG(&uregs->f7, &c.nat->regs.f[7]);
- COPY_FPREG(&uregs->f8, &c.nat->regs.f[8]);
- COPY_FPREG(&uregs->f9, &c.nat->regs.f[9]);
- COPY_FPREG(&uregs->f10, &c.nat->regs.f[10]);
- COPY_FPREG(&uregs->f11, &c.nat->regs.f[11]);
-
- COPY_FPREG(&sw->f12, &c.nat->regs.f[12]);
- COPY_FPREG(&sw->f13, &c.nat->regs.f[13]);
- COPY_FPREG(&sw->f14, &c.nat->regs.f[14]);
- COPY_FPREG(&sw->f15, &c.nat->regs.f[15]);
- COPY_FPREG(&sw->f16, &c.nat->regs.f[16]);
- COPY_FPREG(&sw->f17, &c.nat->regs.f[17]);
- COPY_FPREG(&sw->f18, &c.nat->regs.f[18]);
- COPY_FPREG(&sw->f19, &c.nat->regs.f[19]);
- COPY_FPREG(&sw->f20, &c.nat->regs.f[20]);
- COPY_FPREG(&sw->f21, &c.nat->regs.f[21]);
- COPY_FPREG(&sw->f22, &c.nat->regs.f[22]);
- COPY_FPREG(&sw->f23, &c.nat->regs.f[23]);
- COPY_FPREG(&sw->f24, &c.nat->regs.f[24]);
- COPY_FPREG(&sw->f25, &c.nat->regs.f[25]);
- COPY_FPREG(&sw->f26, &c.nat->regs.f[26]);
- COPY_FPREG(&sw->f27, &c.nat->regs.f[27]);
- COPY_FPREG(&sw->f28, &c.nat->regs.f[28]);
- COPY_FPREG(&sw->f29, &c.nat->regs.f[29]);
- COPY_FPREG(&sw->f30, &c.nat->regs.f[30]);
- COPY_FPREG(&sw->f31, &c.nat->regs.f[31]);
-
- // f32 - f127
- memcpy(&v->arch._thread.fph[0], &c.nat->regs.f[32],
- sizeof(v->arch._thread.fph));
-
-#define UNAT_UPDATE(reg) \
- unat_update(&uregs->eml_unat, &uregs->r ## reg, \
- !!(c.nat->regs.nats & (1UL << (reg))));
-
- uregs->eml_unat = 0;
- UNAT_UPDATE(1);
- UNAT_UPDATE(2);
- UNAT_UPDATE(3);
-
- UNAT_UPDATE(8);
- UNAT_UPDATE(9);
- UNAT_UPDATE(10);
- UNAT_UPDATE(11);
- UNAT_UPDATE(12);
- UNAT_UPDATE(13);
- UNAT_UPDATE(14);
- UNAT_UPDATE(15);
- UNAT_UPDATE(16);
- UNAT_UPDATE(17);
- UNAT_UPDATE(18);
- UNAT_UPDATE(19);
- UNAT_UPDATE(20);
- UNAT_UPDATE(21);
- UNAT_UPDATE(22);
- UNAT_UPDATE(23);
- UNAT_UPDATE(24);
- UNAT_UPDATE(25);
- UNAT_UPDATE(26);
- UNAT_UPDATE(27);
- UNAT_UPDATE(28);
- UNAT_UPDATE(29);
- UNAT_UPDATE(30);
- UNAT_UPDATE(31);
-
- /*
- * r4-r7 is saved sometimes both in pt_regs->r[4-7] and memory stack or
- * only in memory stack.
- * for both cases, both memory stack and pt_regs->r[4-7] are updated.
- */
- uregs->r4 = c.nat->regs.r[4];
- uregs->r5 = c.nat->regs.r[5];
- uregs->r6 = c.nat->regs.r[6];
- uregs->r7 = c.nat->regs.r[7];
-
- UNAT_UPDATE(4);
- UNAT_UPDATE(5);
- UNAT_UPDATE(6);
- UNAT_UPDATE(7);
-#undef UNAT_UPDATE
- if (vcpu_has_not_run(v)) {
- sw->r4 = c.nat->regs.r[4];
- sw->r5 = c.nat->regs.r[5];
- sw->r6 = c.nat->regs.r[6];
- sw->r7 = c.nat->regs.r[7];
-
- unat_update(&sw->ar_unat, &sw->r4,
- !!(c.nat->regs.nats & (1UL << 4)));
- unat_update(&sw->ar_unat, &sw->r5,
- !!(c.nat->regs.nats & (1UL << 5)));
- unat_update(&sw->ar_unat, &sw->r6,
- !!(c.nat->regs.nats & (1UL << 6)));
- unat_update(&sw->ar_unat, &sw->r7,
- !!(c.nat->regs.nats & (1UL << 7)));
- } else {
- unw_set_gr(&info, 4, c.nat->regs.r[4],
- !!(c.nat->regs.nats & (1UL << 4)));
- unw_set_gr(&info, 5, c.nat->regs.r[5],
- !!(c.nat->regs.nats & (1UL << 5)));
- unw_set_gr(&info, 6, c.nat->regs.r[6],
- !!(c.nat->regs.nats & (1UL << 6)));
- unw_set_gr(&info, 7, c.nat->regs.r[7],
- !!(c.nat->regs.nats & (1UL << 7)));
- }
-
- if (!is_hvm_domain(d)) {
- /* domain runs at PL2/3 */
- uregs->cr_ipsr = vcpu_pl_adjust(uregs->cr_ipsr,
- IA64_PSR_CPL0_BIT);
- uregs->ar_rsc = vcpu_pl_adjust(uregs->ar_rsc, 2);
- }
-
- for (i = 0; i < IA64_NUM_DBG_REGS; i++) {
- if (is_hvm_domain(d)) {
- vmx_vcpu_set_dbr(v, i, c.nat->regs.dbr[i]);
- vmx_vcpu_set_ibr(v, i, c.nat->regs.ibr[i]);
- } else {
- vcpu_set_dbr(v, i, c.nat->regs.dbr[i]);
- vcpu_set_ibr(v, i, c.nat->regs.ibr[i]);
- }
- }
-
- /* rr[] must be set before setting itrs[] dtrs[] */
- for (i = 0; i < 8; i++) {
- unsigned long rrval = c.nat->regs.rr[i];
- unsigned long reg = (unsigned long)i << 61;
- IA64FAULT fault = IA64_NO_FAULT;
-
- if (rrval == 0)
- continue;
- if (is_hvm_domain(d)) {
- //without VGCF_EXTRA_REGS check,
- //VTi domain doesn't boot.
- if (c.nat->flags & VGCF_EXTRA_REGS)
- fault = vmx_vcpu_set_rr(v, reg, rrval);
- } else
- fault = vcpu_set_rr(v, reg, rrval);
- if (fault != IA64_NO_FAULT)
- return -EINVAL;
- }
-
- if (c.nat->flags & VGCF_EXTRA_REGS) {
- struct vcpu_tr_regs *tr = &c.nat->regs.tr;
-
- for (i = 0;
- (i < sizeof(tr->itrs) / sizeof(tr->itrs[0])) && i < NITRS;
- i++) {
- if (is_hvm_domain(d))
- vmx_vcpu_itr_i(v, i, tr->itrs[i].pte,
- tr->itrs[i].itir,
- tr->itrs[i].vadr);
- else
- vcpu_set_itr(v, i, tr->itrs[i].pte,
- tr->itrs[i].itir,
- tr->itrs[i].vadr,
- tr->itrs[i].rid);
- }
- for (i = 0;
- (i < sizeof(tr->dtrs) / sizeof(tr->dtrs[0])) && i < NDTRS;
- i++) {
- if (is_hvm_domain(d))
- vmx_vcpu_itr_d(v, i, tr->dtrs[i].pte,
- tr->dtrs[i].itir,
- tr->dtrs[i].vadr);
- else
- vcpu_set_dtr(v, i,
- tr->dtrs[i].pte,
- tr->dtrs[i].itir,
- tr->dtrs[i].vadr,
- tr->dtrs[i].rid);
- }
- v->arch.event_callback_ip = c.nat->event_callback_ip;
- vcpu_set_iva(v, c.nat->regs.cr.iva);
- }
-
- if (is_hvm_domain(d))
- rc = vmx_arch_set_info_guest(v, c);
-
- return rc;
-}
-
-static int relinquish_memory(struct domain *d, struct page_list_head *list)
-{
- struct page_info *page;
-#ifndef __ia64__
- unsigned long x, y;
-#endif
- int ret = 0;
-
- /* Use a recursive lock, as we may enter 'free_domheap_page'. */
- spin_lock_recursive(&d->page_alloc_lock);
-
- while ( (page = page_list_remove_head(list)) )
- {
- /* Grab a reference to the page so it won't disappear from under us. */
- if ( unlikely(!get_page(page, d)) )
- {
- /* Couldn't get a reference -- someone is freeing this page. */
- page_list_add_tail(page, &d->arch.relmem_list);
- continue;
- }
-
- if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
- put_page_and_type(page);
-
- if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
-
-#ifndef __ia64__
- /*
- * Forcibly invalidate base page tables at this point to break circular
- * 'linear page table' references. This is okay because MMU structures
- * are not shared across domains and this domain is now dead. Thus base
- * tables are not in use so a non-zero count means circular reference.
- */
- y = page->u.inuse.type_info;
- for ( ; ; )
- {
- x = y;
- if ( likely((x & (PGT_type_mask|PGT_validated)) !=
- (PGT_base_page_table|PGT_validated)) )
- break;
-
- y = cmpxchg(&page->u.inuse.type_info, x, x & ~PGT_validated);
- if ( likely(y == x) )
- {
- free_page_type(page, PGT_base_page_table);
- break;
- }
- }
-#endif
-
- /* Follow the list chain and /then/ potentially free the page. */
- BUG_ON(get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY);
- page_list_add_tail(page, &d->arch.relmem_list);
- put_page(page);
-
- if (hypercall_preempt_check()) {
- ret = -EAGAIN;
- goto out;
- }
- }
-
- page_list_move(list, &d->arch.relmem_list);
-
- out:
- spin_unlock_recursive(&d->page_alloc_lock);
- return ret;
-}
-
-int domain_relinquish_resources(struct domain *d)
-{
- int ret = 0;
-
- switch (d->arch.relres) {
- case RELRES_not_started:
- pci_release_devices(d);
-
- /* Relinquish guest resources for VT-i domain. */
- if (is_hvm_domain(d))
- vmx_relinquish_guest_resources(d);
- d->arch.relres = RELRES_mm_teardown;
- /*fallthrough*/
-
- case RELRES_mm_teardown:
- if (d->arch.pirq_eoi_map != NULL) {
- put_page(virt_to_page(d->arch.pirq_eoi_map));
- d->arch.pirq_eoi_map = NULL;
- d->arch.auto_unmask = 0;
- }
-
- /* Tear down shadow mode stuff. */
- ret = mm_teardown(d);
- if (ret != 0)
- return ret;
- d->arch.relres = RELRES_xen;
- /* fallthrough */
-
- case RELRES_xen:
- /* Relinquish every xen page of memory. */
- ret = relinquish_memory(d, &d->xenpage_list);
- if (ret != 0)
- return ret;
- d->arch.relres = RELRES_dom;
- /* fallthrough */
-
- case RELRES_dom:
- /* Relinquish every domain page of memory. */
- ret = relinquish_memory(d, &d->page_list);
- if (ret != 0)
- return ret;
- d->arch.relres = RELRES_done;
- /* fallthrough */
-
- case RELRES_done:
- break;
-
- default:
- BUG();
- }
-
- if (is_hvm_domain(d) && d->arch.sal_data)
- xfree(d->arch.sal_data);
-
- return 0;
-}
-
-unsigned long
-domain_set_shared_info_va (unsigned long va)
-{
- struct vcpu *v = current;
- struct domain *d = v->domain;
- int rc;
-
- /* Check virtual address:
- must belong to region 7,
- must be 64Kb aligned,
- must not be within Xen virtual space. */
- if ((va >> 61) != 7
- || (va & 0xffffUL) != 0
- || (va >= HYPERVISOR_VIRT_START && va < HYPERVISOR_VIRT_END))
- panic_domain (NULL, "%s: bad va (0x%016lx)\n", __func__, va);
-
- /* Note: this doesn't work well if other cpus are already running.
- However this is part of the spec :-) */
- gdprintk(XENLOG_DEBUG, "Domain set shared_info_va to 0x%016lx\n", va);
- d->arch.shared_info_va = va;
-
- VCPU(v, interrupt_mask_addr) = (unsigned char *)va +
- INT_ENABLE_OFFSET(v);
- set_current_psr_i_addr(v);
-
- /* Remap the shared pages. */
- BUG_ON(VMX_DOMAIN(v));
- rc = !set_one_rr(7UL << 61, PSCB(v,rrs[7]));
- BUG_ON(rc);
-
- return rc;
-}
-
-/* Transfer and clear the shadow bitmap in 1kB chunks for L1 cache. */
-#define SHADOW_COPY_CHUNK 1024
-
-int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc)
-{
- unsigned int op = sc->op;
- int rc = 0;
- int i;
- //struct vcpu *v;
-
- if (unlikely(d == current->domain)) {
- gdprintk(XENLOG_INFO,
- "Don't try to do a shadow op on yourself!\n");
- return -EINVAL;
- }
-
- domain_pause(d);
-
- switch (op)
- {
- case XEN_DOMCTL_SHADOW_OP_OFF:
- if (shadow_mode_enabled (d)) {
- u64 *bm = d->arch.shadow_bitmap;
- struct vcpu *v;
-
- for_each_vcpu(d, v)
- v->arch.shadow_bitmap = NULL;
-
- /* Flush vhpt and tlb to restore dirty bit usage. */
- flush_tlb_for_log_dirty(d);
-
- /* Free bitmap. */
- d->arch.shadow_bitmap_size = 0;
- d->arch.shadow_bitmap = NULL;
- xfree(bm);
- }
- break;
-
- case XEN_DOMCTL_SHADOW_OP_ENABLE_TEST:
- case XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE:
- rc = -EINVAL;
- break;
-
- case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
- if (shadow_mode_enabled(d)) {
- rc = -EINVAL;
- break;
- }
-
- atomic64_set(&d->arch.shadow_fault_count, 0);
- atomic64_set(&d->arch.shadow_dirty_count, 0);
-
- d->arch.shadow_bitmap_size =
- (domain_get_maximum_gpfn(d) + BITS_PER_LONG) &
- ~(BITS_PER_LONG - 1);
- d->arch.shadow_bitmap = xmalloc_array(unsigned long,
- d->arch.shadow_bitmap_size / BITS_PER_LONG);
- if (d->arch.shadow_bitmap == NULL) {
- d->arch.shadow_bitmap_size = 0;
- rc = -ENOMEM;
- }
- else {
- struct vcpu *v;
- memset(d->arch.shadow_bitmap, 0,
- d->arch.shadow_bitmap_size / 8);
-
- for_each_vcpu(d, v)
- v->arch.shadow_bitmap = d->arch.shadow_bitmap;
- /* Flush vhtp and tlb to enable dirty bit
- virtualization. */
- flush_tlb_for_log_dirty(d);
- }
- break;
-
- case XEN_DOMCTL_SHADOW_OP_CLEAN:
- {
- int nbr_bytes;
-
- sc->stats.fault_count = atomic64_read(&d->arch.shadow_fault_count);
- sc->stats.dirty_count = atomic64_read(&d->arch.shadow_dirty_count);
-
- atomic64_set(&d->arch.shadow_fault_count, 0);
- atomic64_set(&d->arch.shadow_dirty_count, 0);
-
- if (guest_handle_is_null(sc->dirty_bitmap) ||
- (d->arch.shadow_bitmap == NULL)) {
- rc = -EINVAL;
- break;
- }
-
- if (sc->pages > d->arch.shadow_bitmap_size)
- sc->pages = d->arch.shadow_bitmap_size;
-
- nbr_bytes = (sc->pages + 7) / 8;
-
- for (i = 0; i < nbr_bytes; i += SHADOW_COPY_CHUNK) {
- int size = (nbr_bytes - i) > SHADOW_COPY_CHUNK ?
- SHADOW_COPY_CHUNK : nbr_bytes - i;
-
- if (copy_to_guest_offset(
- sc->dirty_bitmap, i,
- (uint8_t *)d->arch.shadow_bitmap + i,
- size)) {
- rc = -EFAULT;
- break;
- }
-
- memset((uint8_t *)d->arch.shadow_bitmap + i, 0, size);
- }
- flush_tlb_for_log_dirty(d);
-
- break;
- }
-
- case XEN_DOMCTL_SHADOW_OP_PEEK:
- {
- unsigned long size;
-
- sc->stats.fault_count = atomic64_read(&d->arch.shadow_fault_count);
- sc->stats.dirty_count = atomic64_read(&d->arch.shadow_dirty_count);
-
- if (guest_handle_is_null(sc->dirty_bitmap) ||
- (d->arch.shadow_bitmap == NULL)) {
- rc = -EINVAL;
- break;
- }
-
- if (sc->pages > d->arch.shadow_bitmap_size)
- sc->pages = d->arch.shadow_bitmap_size;
-
- size = (sc->pages + 7) / 8;
- if (copy_to_guest(sc->dirty_bitmap,
- (uint8_t *)d->arch.shadow_bitmap, size)) {
- rc = -EFAULT;
- break;
- }
- break;
- }
- case XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
- sc->mb = 0;
- break;
- case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
- if (sc->mb > 0) {
- BUG();
- rc = -ENOMEM;
- }
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- domain_unpause(d);
-
- return rc;
-}
-
-// remove following line if not privifying in memory
-//#define HAVE_PRIVIFY_MEMORY
-#ifndef HAVE_PRIVIFY_MEMORY
-#define privify_memory(x,y) do {} while(0)
-#endif
-
-static void __init loaddomainelfimage(struct domain *d, struct elf_binary *elf,
- unsigned long phys_load_offset)
-{
- const elf_phdr *phdr;
- int phnum, h, filesz, memsz;
- unsigned long elfaddr, dom_mpaddr, dom_imva;
- struct page_info *p;
-
- phnum = elf_uval(elf, elf->ehdr, e_phnum);
- for (h = 0; h < phnum; h++) {
- phdr = elf_phdr_by_index(elf, h);
- if (!elf_phdr_is_loadable(elf, phdr))
- continue;
-
- filesz = elf_uval(elf, phdr, p_filesz);
- memsz = elf_uval(elf, phdr, p_memsz);
- elfaddr = (unsigned long) elf->image + elf_uval(elf, phdr, p_offset);
- dom_mpaddr = elf_uval(elf, phdr, p_paddr);
- dom_mpaddr += phys_load_offset;
-
- while (memsz > 0) {
- p = assign_new_domain_page(d,dom_mpaddr);
- BUG_ON (unlikely(p == NULL));
- dom_imva = __va_ul(page_to_maddr(p));
- if (filesz > 0) {
- if (filesz >= PAGE_SIZE)
- copy_page((void *) dom_imva,
- (void *) elfaddr);
- else {
- // copy partial page
- memcpy((void *) dom_imva,
- (void *) elfaddr, filesz);
- // zero the rest of page
- memset((void *) dom_imva+filesz, 0,
- PAGE_SIZE-filesz);
- }
-//FIXME: This test for code seems to find a lot more than objdump -x does
- if (elf_uval(elf, phdr, p_flags) & PF_X) {
- privify_memory(dom_imva,PAGE_SIZE);
- flush_icache_range(dom_imva,
- dom_imva+PAGE_SIZE);
- }
- }
- else if (memsz > 0) {
- /* always zero out entire page */
- clear_page((void *) dom_imva);
- }
- memsz -= PAGE_SIZE;
- filesz -= PAGE_SIZE;
- elfaddr += PAGE_SIZE;
- dom_mpaddr += PAGE_SIZE;
- }
- }
-}
-
-static void __init calc_dom0_size(void)
-{
- unsigned long domheap_pages;
- unsigned long p2m_pages;
- unsigned long spare_hv_pages;
- unsigned long max_dom0_size;
- unsigned long iommu_pg_table_pages = 0;
-
- /* Estimate maximum memory we can safely allocate for dom0
- * by subtracting the p2m table allocation and a chunk of memory
- * for DMA and PCI mapping from the available domheap pages. The
- * chunk for DMA, PCI, etc., is a guestimate, as xen doesn't seem
- * to have a good idea of what those requirements might be ahead
- * of time, calculated at 128MB + 1MB per 4GB of system memory */
- domheap_pages = avail_domheap_pages();
- p2m_pages = domheap_pages / PTRS_PER_PTE;
- spare_hv_pages = 8192 + (domheap_pages / 4096);
-
- if (iommu_enabled)
- iommu_pg_table_pages = domheap_pages * 4 / 512;
- /* There are 512 ptes in one 4K vtd page. */
-
- max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages) -
- iommu_pg_table_pages) * PAGE_SIZE;
- printk("Maximum permitted dom0 size: %luMB\n",
- max_dom0_size / (1024*1024));
-
- /* validate proposed dom0_size, fix up as needed */
- if (dom0_size > max_dom0_size) {
- printk("Reducing dom0 memory allocation from %luK to %luK "
- "to fit available memory\n",
- dom0_size / 1024, max_dom0_size / 1024);
- dom0_size = max_dom0_size;
- }
-
- /* dom0_mem=0 can be passed in to give all available mem to dom0 */
- if (dom0_size == 0) {
- printk("Allocating all available memory to dom0\n");
- dom0_size = max_dom0_size;
- }
-
- /* Check dom0 size. */
- if (dom0_size < 4 * 1024 * 1024) {
- panic("dom0_mem is too small, boot aborted"
- " (try e.g. dom0_mem=256M or dom0_mem=65536K)\n");
- }
-
- if (running_on_sim) {
- dom0_size = 128*1024*1024; //FIXME: Should be configurable
- }
-
- /* no need to allocate pages for now
- * pages are allocated by map_new_domain_page() via loaddomainelfimage()
- */
-}
-
-
-/*
- * Domain 0 has direct access to all devices absolutely. However
- * the major point of this stub here, is to allow alloc_dom_mem
- * handled with order > 0 request. Dom0 requires that bit set to
- * allocate memory for other domains.
- */
-static void __init physdev_init_dom0(struct domain *d)
-{
- if (iomem_permit_access(d, 0UL, ~0UL))
- BUG();
- if (irqs_permit_access(d, 0, NR_IRQS-1))
- BUG();
- if (ioports_permit_access(d, 0, 0, 0xffff))
- BUG();
-}
-
-int __init construct_dom0(struct domain *d,
- unsigned long image_start, unsigned long image_len,
- unsigned long initrd_start, unsigned long initrd_len,
- char *cmdline)
-{
- int i, rc;
- start_info_t *si;
- dom0_vga_console_info_t *ci;
- struct vcpu *v = d->vcpu[0];
- unsigned long max_pages;
-
- struct elf_binary elf;
- struct elf_dom_parms parms;
- unsigned long p_start;
- unsigned long pkern_start;
- unsigned long pkern_entry;
- unsigned long pkern_end;
- unsigned long pinitrd_start = 0;
- unsigned long pstart_info;
- unsigned long phys_load_offset;
- struct page_info *start_info_page;
- unsigned long bp_mpa;
- struct ia64_boot_param *bp;
-
-//printk("construct_dom0: starting\n");
-
- /* Sanity! */
- BUG_ON(d != dom0);
- BUG_ON(d->vcpu == NULL);
- BUG_ON(d->vcpu[0] == NULL);
- BUG_ON(v->is_initialised);
-
- printk("*** LOADING DOMAIN 0 ***\n");
-
- calc_dom0_size();
-
- max_pages = dom0_size / PAGE_SIZE;
- d->max_pages = max_pages;
- d->tot_pages = 0;
-
- rc = elf_init(&elf, (void*)image_start, image_len);
- if ( rc != 0 )
- return rc;
-#ifdef VERBOSE
- elf_set_verbose(&elf);
-#endif
- elf_parse_binary(&elf);
- if (0 != (elf_xen_parse(&elf, &parms)))
- return rc;
-
- /*
- * We cannot rely on the load address in the ELF headers to
- * determine the meta physical address at which the image
- * is loaded. Patch the address to match the real one, based
- * on xen_pstart
- */
- phys_load_offset = xen_pstart - elf.pstart;
- elf.pstart += phys_load_offset;
- elf.pend += phys_load_offset;
- parms.virt_kstart += phys_load_offset;
- parms.virt_kend += phys_load_offset;
- parms.virt_entry += phys_load_offset;
-
- printk(" Dom0 kernel: %s, %s, paddr 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
- elf_64bit(&elf) ? "64-bit" : "32-bit",
- elf_msb(&elf) ? "msb" : "lsb",
- elf.pstart, elf.pend);
- if (!elf_64bit(&elf) ||
- elf_uval(&elf, elf.ehdr, e_machine) != EM_IA_64) {
- printk("Incompatible kernel binary\n");
- return -1;
- }
-
- if (parms.elf_notes[XEN_ELFNOTE_SUPPORTED_FEATURES].type != XEN_ENT_NONE &&
- !test_bit(XENFEAT_dom0, parms.f_supported))
- {
- printk("Kernel does not support Dom0 operation\n");
- return -1;
- }
-
- p_start = parms.virt_base;
- pkern_start = parms.virt_kstart;
- pkern_end = parms.virt_kend;
- pkern_entry = parms.virt_entry;
-
-//printk("p_start=%lx, pkern_start=%lx, pkern_end=%lx, pkern_entry=%lx\n",p_start,pkern_start,pkern_end,pkern_entry);
-
- if ( (p_start & (PAGE_SIZE-1)) != 0 )
- {
- printk("Initial guest OS must load to a page boundary.\n");
- return -EINVAL;
- }
-
- pstart_info = PAGE_ALIGN(pkern_end);
- if(initrd_start && initrd_len){
- unsigned long offset;
-
- /* The next page aligned boundary after the start info.
- Note: EFI_PAGE_SHIFT = 12 <= PAGE_SHIFT */
- pinitrd_start = pstart_info + PAGE_SIZE;
-
- if ((pinitrd_start + initrd_len - phys_load_offset) >= dom0_size)
- panic("%s: not enough memory assigned to dom0", __func__);
-
- for (offset = 0; offset < initrd_len; offset += PAGE_SIZE) {
- struct page_info *p;
- p = assign_new_domain_page(d, pinitrd_start + offset);
- if (p == NULL)
- panic("%s: can't allocate page for initrd image", __func__);
- if (initrd_len < offset + PAGE_SIZE)
- memcpy(page_to_virt(p), (void*)(initrd_start + offset),
- initrd_len - offset);
- else
- copy_page(page_to_virt(p), (void*)(initrd_start + offset));
- }
- }
-
- printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
- " Kernel image: %lx->%lx\n"
- " Entry address: %lx\n"
- " Init. ramdisk: %lx len %lx\n"
- " Start info.: %lx->%lx\n",
- pkern_start, pkern_end, pkern_entry, pinitrd_start, initrd_len,
- pstart_info, pstart_info + PAGE_SIZE);
-
- if ( (pkern_end - pkern_start) > (max_pages * PAGE_SIZE) )
- {
- printk("Initial guest OS requires too much space\n"
- "(%luMB is greater than %luMB limit)\n",
- (pkern_end-pkern_start)>>20,
- (max_pages <<PAGE_SHIFT)>>20);
- return -ENOMEM;
- }
-
- // if high 3 bits of pkern start are non-zero, error
-
- // if pkern end is after end of metaphysical memory, error
- // (we should be able to deal with this... later)
-
- /* Mask all upcalls... */
- for ( i = 1; i < XEN_LEGACY_MAX_VCPUS; i++ )
- d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
-
- printk ("Dom0 max_vcpus=%d\n", dom0_max_vcpus);
- for ( i = 1; i < dom0_max_vcpus; i++ )
- if (alloc_vcpu(d, i, i) == NULL)
- panic("Cannot allocate dom0 vcpu %d\n", i);
-
- /* Copy the OS image. */
- loaddomainelfimage(d, &elf, phys_load_offset);
-
- BUILD_BUG_ON(sizeof(start_info_t) + sizeof(dom0_vga_console_info_t) +
- sizeof(struct ia64_boot_param) > PAGE_SIZE);
-
- /* Set up start info area. */
- d->shared_info->arch.start_info_pfn = pstart_info >> PAGE_SHIFT;
- start_info_page = assign_new_domain_page(d, pstart_info);
- if (start_info_page == NULL)
- panic("can't allocate start info page");
- si = page_to_virt(start_info_page);
- clear_page(si);
- snprintf(si->magic, sizeof(si->magic), "xen-3.0-ia64");
- si->nr_pages = max_pages;
- si->flags = SIF_INITDOMAIN|SIF_PRIVILEGED;
- si->flags |= (xen_processor_pmbits << 8) & SIF_PM_MASK;
-
- printk("Dom0: 0x%lx\n", (u64)dom0);
-
- v->is_initialised = 1;
- clear_bit(_VPF_down, &v->pause_flags);
-
- /* Build firmware.
- Note: Linux kernel reserve memory used by start_info, so there is
- no need to remove it from MDT. */
- bp_mpa = pstart_info + sizeof(struct start_info);
- rc = dom_fw_setup(d, bp_mpa, max_pages * PAGE_SIZE);
- if (rc != 0)
- return rc;
-
- /* Fill boot param. */
- strlcpy((char *)si->cmd_line, dom0_command_line, sizeof(si->cmd_line));
-
- bp = (struct ia64_boot_param *)((unsigned char *)si +
- sizeof(start_info_t));
- bp->command_line = pstart_info + offsetof (start_info_t, cmd_line);
-
- /* We assume console has reached the last line! */
- bp->console_info.num_cols = ia64_boot_param->console_info.num_cols;
- bp->console_info.num_rows = ia64_boot_param->console_info.num_rows;
- bp->console_info.orig_x = 0;
- bp->console_info.orig_y = bp->console_info.num_rows == 0 ?
- 0 : bp->console_info.num_rows - 1;
-
- bp->initrd_start = pinitrd_start;
- bp->initrd_size = ia64_boot_param->initrd_size;
-
- ci = (dom0_vga_console_info_t *)((unsigned char *)si +
- sizeof(start_info_t) +
- sizeof(struct ia64_boot_param));
-
- if (fill_console_start_info(ci)) {
- si->console.dom0.info_off = sizeof(start_info_t) +
- sizeof(struct ia64_boot_param);
- si->console.dom0.info_size = sizeof(dom0_vga_console_info_t);
- }
-
- vcpu_init_regs (v);
-
- vcpu_regs(v)->r28 = bp_mpa;
-
- vcpu_regs (v)->cr_iip = pkern_entry;
-
- physdev_init_dom0(d);
-
- iommu_dom0_init(d);
-
- return 0;
-}
-
-struct vcpu *__init alloc_dom0_vcpu0(void)
-{
- if (dom0_max_vcpus == 0)
- dom0_max_vcpus = MAX_VIRT_CPUS;
- if (dom0_max_vcpus > num_online_cpus())
- dom0_max_vcpus = num_online_cpus();
- if (dom0_max_vcpus > MAX_VIRT_CPUS)
- dom0_max_vcpus = MAX_VIRT_CPUS;
-
- dom0->vcpu = xmalloc_array(struct vcpu *, dom0_max_vcpus);
- if ( !dom0->vcpu )
- return NULL;
- memset(dom0->vcpu, 0, dom0_max_vcpus * sizeof(*dom0->vcpu));
- dom0->max_vcpus = dom0_max_vcpus;
-
- return alloc_vcpu(dom0, 0, 0);
-}
-
-void machine_restart(unsigned int delay_millisecs)
-{
- mdelay(delay_millisecs);
- console_start_sync();
- if (running_on_sim)
- printk ("machine_restart called. spinning...\n");
- else
- (*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
- while(1);
-}
-
-extern void cpu_halt(void);
-
-void machine_halt(void)
-{
- console_start_sync();
-
-#ifdef CONFIG_SMP
- smp_send_stop();
-#endif
-
- printk ("machine_halt called. spinning...\n");
- while(1);
-}
-
-void sync_local_execstate(void)
-{
-}
-
-void sync_vcpu_execstate(struct vcpu *v)
-{
-// __ia64_save_fpu(v->arch._thread.fph);
- // FIXME SMP: Anything else needed here for SMP?
-}
-
-/* This function is taken from xen/arch/x86/domain.c */
-long
-arch_do_vcpu_op(int cmd, struct vcpu *v, XEN_GUEST_HANDLE(void) arg)
-{
- long rc = 0;
-
- switch (cmd) {
- case VCPUOP_register_runstate_memory_area:
- {
- struct vcpu_register_runstate_memory_area area;
- struct vcpu_runstate_info runstate;
-
- rc = -EFAULT;
- if (copy_from_guest(&area, arg, 1))
- break;
-
- if (!guest_handle_okay(area.addr.h, 1))
- break;
-
- rc = 0;
- runstate_guest(v) = area.addr.h;
-
- if (v == current) {
- __copy_to_guest(runstate_guest(v), &v->runstate, 1);
- } else {
- vcpu_runstate_get(v, &runstate);
- __copy_to_guest(runstate_guest(v), &runstate, 1);
- }
-
- break;
- }
- default:
- rc = -ENOSYS;
- break;
- }
-
- return rc;
-}
-
-size_param("dom0_mem", dom0_size);
-
-/*
- * Helper function for the optimization stuff handling the identity mapping
- * feature.
- */
-static inline unsigned long
-optf_identity_mapping_cmd_to_flg(unsigned long cmd)
-{
- switch(cmd) {
- case XEN_IA64_OPTF_IDENT_MAP_REG7:
- return XEN_IA64_OPTF_IDENT_MAP_REG7_FLG;
- case XEN_IA64_OPTF_IDENT_MAP_REG4:
- return XEN_IA64_OPTF_IDENT_MAP_REG4_FLG;
- case XEN_IA64_OPTF_IDENT_MAP_REG5:
- return XEN_IA64_OPTF_IDENT_MAP_REG5_FLG;
- default:
- BUG();
- return 0;
- }
-
- /* NOTREACHED */
-}
-
-static inline void
-optf_set_identity_mapping(unsigned long* mask, struct identity_mapping* im,
- struct xen_ia64_opt_feature* f)
-{
- unsigned long flag = optf_identity_mapping_cmd_to_flg(f->cmd);
-
- if (f->on) {
- *mask |= flag;
- im->pgprot = f->pgprot;
- im->key = f->key;
- } else {
- *mask &= ~flag;
- im->pgprot = 0;
- im->key = 0;
- }
-}
-
-/*
- * Switch an optimization feature on/off.
- * The vcpu must be paused to avoid racy access to opt_feature.
- */
-int
-domain_opt_feature(struct domain *d, struct xen_ia64_opt_feature* f)
-{
- struct opt_feature* optf = &d->arch.opt_feature;
- struct vcpu *v;
- long rc = 0;
-
- for_each_vcpu(d, v) {
- if (v != current)
- vcpu_pause(v);
- }
-
- switch (f->cmd) {
- case XEN_IA64_OPTF_IDENT_MAP_REG4:
- optf_set_identity_mapping(&optf->mask, &optf->im_reg4, f);
- break;
- case XEN_IA64_OPTF_IDENT_MAP_REG5:
- optf_set_identity_mapping(&optf->mask, &optf->im_reg5, f);
- break;
- case XEN_IA64_OPTF_IDENT_MAP_REG7:
- optf_set_identity_mapping(&optf->mask, &optf->im_reg7, f);
- break;
- default:
- printk("%s: unknown opt_feature: %ld\n", __func__, f->cmd);
- rc = -ENOSYS;
- break;
- }
-
- for_each_vcpu(d, v) {
- if (v != current)
- vcpu_unpause(v);
- }
-
- return rc;
-}
-
diff --git a/xen/arch/ia64/xen/faults.c b/xen/arch/ia64/xen/faults.c
deleted file mode 100644
index 75a0cb095d..0000000000
--- a/xen/arch/ia64/xen/faults.c
+++ /dev/null
@@ -1,835 +0,0 @@
-/*
- * Miscellaneous process/domain related routines
- *
- * Copyright (C) 2004 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- *
- */
-
-#include <xen/config.h>
-#include <xen/lib.h>
-#include <xen/errno.h>
-#include <xen/sched.h>
-#include <xen/smp.h>
-#include <asm/ptrace.h>
-#include <xen/delay.h>
-#include <xen/perfc.h>
-#include <xen/mm.h>
-
-#include <asm/system.h>
-#include <asm/processor.h>
-#include <xen/irq.h>
-#include <xen/event.h>
-#include <asm/privop.h>
-#include <asm/vcpu.h>
-#include <asm/ia64_int.h>
-#include <asm/dom_fw.h>
-#include <asm/vhpt.h>
-#include <asm/debugger.h>
-#include <asm/fpswa.h>
-#include <asm/bundle.h>
-#include <asm/asm-xsi-offsets.h>
-#include <asm/shadow.h>
-#include <asm/uaccess.h>
-#include <asm/p2m_entry.h>
-
-extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
-/* FIXME: where these declarations shold be there ? */
-extern int ia64_hyperprivop(unsigned long, REGS *);
-extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
-
-extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
-
-// should never panic domain... if it does, stack may have been overrun
-static void check_bad_nested_interruption(unsigned long isr,
- struct pt_regs *regs,
- unsigned long vector)
-{
- struct vcpu *v = current;
-
- if (!(PSCB(v, ipsr) & IA64_PSR_DT)) {
- panic_domain(regs,
- "psr.dt off, trying to deliver nested dtlb!\n");
- }
- vector &= ~0xf;
- if (vector != IA64_DATA_TLB_VECTOR &&
- vector != IA64_ALT_DATA_TLB_VECTOR &&
- vector != IA64_VHPT_TRANS_VECTOR) {
- panic_domain(regs, "psr.ic off, delivering fault=%lx,"
- "ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
- vector, regs->cr_ipsr, regs->cr_iip, PSCB(v, ifa),
- isr, PSCB(v, iip));
- }
-}
-
-static void reflect_interruption(unsigned long isr, struct pt_regs *regs,
- unsigned long vector)
-{
- struct vcpu *v = current;
-
- if (!PSCB(v, interrupt_collection_enabled))
- check_bad_nested_interruption(isr, regs, vector);
- PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
- PSCB(v, precover_ifs) = regs->cr_ifs;
- PSCB(v, ipsr) = vcpu_get_psr(v);
- vcpu_bsw0(v);
- PSCB(v, isr) = isr;
- PSCB(v, iip) = regs->cr_iip;
- PSCB(v, ifs) = 0;
-
- regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
- regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
- regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
- if (PSCB(v, dcr) & IA64_DCR_BE)
- regs->cr_ipsr |= IA64_PSR_BE;
- else
- regs->cr_ipsr &= ~IA64_PSR_BE;
-
- if (PSCB(v, hpsr_dfh))
- regs->cr_ipsr |= IA64_PSR_DFH;
- PSCB(v, vpsr_dfh) = 0;
- v->vcpu_info->evtchn_upcall_mask = 1;
- PSCB(v, interrupt_collection_enabled) = 0;
-
- perfc_incra(slow_reflect, vector >> 8);
-
- debugger_event(vector == IA64_EXTINT_VECTOR ?
- XEN_IA64_DEBUG_ON_EXTINT : XEN_IA64_DEBUG_ON_EXCEPT);
-}
-
-void reflect_event(void)
-{
- struct vcpu *v = current;
- struct pt_regs *regs;
- unsigned long isr;
-
- if (!event_pending(v))
- return;
-
- /* Sanity check */
- if (is_idle_vcpu(v)) {
- //printk("WARN: invocation to reflect_event in nested xen\n");
- return;
- }
-
- regs = vcpu_regs(v);
-
- isr = regs->cr_ipsr & IA64_PSR_RI;
-
- if (!PSCB(v, interrupt_collection_enabled))
- printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx,"
- "isr=%lx,viip=0x%lx\n",
- regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
- PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
- PSCB(v, precover_ifs) = regs->cr_ifs;
- PSCB(v, ipsr) = vcpu_get_psr(v);
- vcpu_bsw0(v);
- PSCB(v, isr) = isr;
- PSCB(v, iip) = regs->cr_iip;
- PSCB(v, ifs) = 0;
-
- regs->cr_iip = v->arch.event_callback_ip;
- regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
- regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
- if (PSCB(v, dcr) & IA64_DCR_BE)
- regs->cr_ipsr |= IA64_PSR_BE;
- else
- regs->cr_ipsr &= ~IA64_PSR_BE;
-
-
- if (PSCB(v, hpsr_dfh))
- regs->cr_ipsr |= IA64_PSR_DFH;
- PSCB(v, vpsr_dfh) = 0;
- v->vcpu_info->evtchn_upcall_mask = 1;
- PSCB(v, interrupt_collection_enabled) = 0;
-
- debugger_event(XEN_IA64_DEBUG_ON_EVENT);
-}
-
-static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
-{
- if (!PSCB(v, interrupt_collection_enabled)) {
- PSCB(v, ifs) = regs->cr_ifs;
- regs->cr_ifs = 0;
- perfc_incr(lazy_cover);
- return 1; // retry same instruction with cr.ifs off
- }
- return 0;
-}
-
-void ia64_do_page_fault(unsigned long address, unsigned long isr,
- struct pt_regs *regs, unsigned long itir)
-{
- unsigned long iip = regs->cr_iip, iha;
- // FIXME should validate address here
- unsigned long pteval;
- unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
- IA64FAULT fault;
- int is_ptc_l_needed = 0;
- ia64_itir_t _itir = {.itir = itir};
-
- if ((isr & IA64_ISR_SP)
- || ((isr & IA64_ISR_NA)
- && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
- /*
- * This fault was due to a speculative load or lfetch.fault,
- * set the "ed" bit in the psr to ensure forward progress.
- * (Target register will get a NaT for ld.s, lfetch will be
- * canceled.)
- */
- ia64_psr(regs)->ed = 1;
- return;
- }
-
- again:
- fault = vcpu_translate(current, address, is_data, &pteval,
- &itir, &iha);
- if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
- struct p2m_entry entry;
- unsigned long m_pteval;
- m_pteval = translate_domain_pte(pteval, address, itir,
- &(_itir.itir), &entry);
- vcpu_itc_no_srlz(current, is_data ? 2 : 1, address,
- m_pteval, pteval, _itir.itir, &entry);
- if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
- p2m_entry_retry(&entry)) {
- /* dtlb has been purged in-between. This dtlb was
- matching. Undo the work. */
- vcpu_flush_tlb_vhpt_range(address, _itir.ps);
-
- // the stale entry which we inserted above
- // may remains in tlb cache.
- // we don't purge it now hoping next itc purges it.
- is_ptc_l_needed = 1;
- goto again;
- }
- return;
- }
-
- if (is_ptc_l_needed)
- vcpu_ptc_l(current, address, _itir.ps);
- if (!guest_mode(regs)) {
- /* The fault occurs inside Xen. */
- if (!ia64_done_with_exception(regs)) {
- // should never happen. If it does, region 0 addr may
- // indicate a bad xen pointer
- printk("*** xen_handle_domain_access: exception table"
- " lookup failed, iip=0x%lx, addr=0x%lx, "
- "spinning...\n", iip, address);
- panic_domain(regs, "*** xen_handle_domain_access: "
- "exception table lookup failed, "
- "iip=0x%lx, addr=0x%lx, spinning...\n",
- iip, address);
- }
- return;
- }
-
- if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
- return;
-
- if (!PSCB(current, interrupt_collection_enabled)) {
- check_bad_nested_interruption(isr, regs, fault);
- //printk("Delivering NESTED DATA TLB fault\n");
- fault = IA64_DATA_NESTED_TLB_VECTOR;
- regs->cr_iip =
- ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
- regs->cr_ipsr =
- (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
- regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,
- IA64_PSR_CPL0_BIT);
- if (PSCB(current, dcr) & IA64_DCR_BE)
- regs->cr_ipsr |= IA64_PSR_BE;
- else
- regs->cr_ipsr &= ~IA64_PSR_BE;
-
-
- if (PSCB(current, hpsr_dfh))
- regs->cr_ipsr |= IA64_PSR_DFH;
- PSCB(current, vpsr_dfh) = 0;
- perfc_incra(slow_reflect, fault >> 8);
- return;
- }
-
- PSCB(current, itir) = itir;
- PSCB(current, iha) = iha;
- PSCB(current, ifa) = address;
- reflect_interruption(isr, regs, fault);
-}
-
-fpswa_interface_t *fpswa_interface = 0;
-
-void __init trap_init(void)
-{
- if (ia64_boot_param->fpswa)
- /* FPSWA fixup: make the interface pointer a virtual address */
- fpswa_interface = __va(ia64_boot_param->fpswa);
- else
- printk("No FPSWA supported.\n");
-}
-
-static fpswa_ret_t
-fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
- unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
- unsigned long *ifs, struct pt_regs *regs)
-{
- fp_state_t fp_state;
- fpswa_ret_t ret;
- XEN_EFI_RR_DECLARE(rr6, rr7);
-
- if (!fpswa_interface)
- return (fpswa_ret_t) {-1, 0, 0, 0};
-
- memset(&fp_state, 0, sizeof(fp_state_t));
-
- /*
- * compute fp_state. only FP registers f6 - f11 are used by the
- * kernel, so set those bits in the mask and set the low volatile
- * pointer to point to these registers.
- */
- fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
-
- fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
- /*
- * unsigned long (*EFI_FPSWA) (
- * unsigned long trap_type,
- * void *Bundle,
- * unsigned long *pipsr,
- * unsigned long *pfsr,
- * unsigned long *pisr,
- * unsigned long *ppreds,
- * unsigned long *pifs,
- * void *fp_state);
- */
- XEN_EFI_RR_ENTER(rr6, rr7);
- ret = (*fpswa_interface->fpswa) (fp_fault, bundle,
- ipsr, fpsr, isr, pr, ifs, &fp_state);
- XEN_EFI_RR_LEAVE(rr6, rr7);
-
- return ret;
-}
-
-/*
- * Handle floating-point assist faults and traps for domain.
- */
-unsigned long
-handle_fpu_swa(int fp_fault, struct pt_regs *regs, unsigned long isr)
-{
- IA64_BUNDLE bundle;
- unsigned long fault_ip;
- fpswa_ret_t ret;
- unsigned long rc;
-
- fault_ip = regs->cr_iip;
- /*
- * When the FP trap occurs, the trapping instruction is completed.
- * If ipsr.ri == 0, there is the trapping instruction in previous
- * bundle.
- */
- if (!fp_fault && (ia64_psr(regs)->ri == 0))
- fault_ip -= 16;
-
- if (VMX_DOMAIN(current)) {
- rc = __vmx_get_domain_bundle(fault_ip, &bundle);
- } else {
- rc = 0;
- if (vcpu_get_domain_bundle(current, regs, fault_ip,
- &bundle) == 0)
- rc = IA64_RETRY;
- }
- if (rc == IA64_RETRY) {
- PSCBX(current, fpswa_ret) = (fpswa_ret_t){IA64_RETRY, 0, 0, 0};
- gdprintk(XENLOG_DEBUG,
- "%s(%s): floating-point bundle at 0x%lx not mapped\n",
- __FUNCTION__, fp_fault ? "fault" : "trap", fault_ip);
- return IA64_RETRY;
- }
-
- ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
- &isr, &regs->pr, &regs->cr_ifs, regs);
-
- if (ret.status) {
- PSCBX(current, fpswa_ret) = ret;
- gdprintk(XENLOG_ERR, "%s(%s): fp_emulate() returned %ld\n",
- __FUNCTION__, fp_fault ? "fault" : "trap",
- ret.status);
- }
-
- return ret.status;
-}
-
-void
-ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
- unsigned long iim, unsigned long itir, unsigned long arg5,
- unsigned long arg6, unsigned long arg7, unsigned long stack)
-{
- struct pt_regs *regs = (struct pt_regs *)&stack;
- unsigned long code;
- static const char *const reason[] = {
- "IA-64 Illegal Operation fault",
- "IA-64 Privileged Operation fault",
- "IA-64 Privileged Register fault",
- "IA-64 Reserved Register/Field fault",
- "Disabled Instruction Set Transition fault",
- "Unknown fault 5", "Unknown fault 6",
- "Unknown fault 7", "Illegal Hazard fault",
- "Unknown fault 9", "Unknown fault 10",
- "Unknown fault 11", "Unknown fault 12",
- "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
- };
-
- printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, "
- "ipsr=0x%016lx, isr=0x%016lx\n", vector, ifa,
- regs->cr_iip, regs->cr_ipsr, isr);
-
- if ((isr & IA64_ISR_NA) &&
- ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
- /*
- * This fault was due to lfetch.fault, set "ed" bit in the
- * psr to cancel the lfetch.
- */
- ia64_psr(regs)->ed = 1;
- printk("ia64_fault: handled lfetch.fault\n");
- return;
- }
-
- switch (vector) {
- case 0:
- printk("VHPT Translation.\n");
- break;
-
- case 4:
- printk("Alt DTLB.\n");
- break;
-
- case 6:
- printk("Instruction Key Miss.\n");
- break;
-
- case 7:
- printk("Data Key Miss.\n");
- break;
-
- case 8:
- printk("Dirty-bit.\n");
- break;
-
- case 10:
- /* __domain_get_bundle() may cause fault. */
- if (ia64_done_with_exception(regs))
- return;
- printk("Data Access-bit.\n");
- break;
-
- case 20:
- printk("Page Not Found.\n");
- break;
-
- case 21:
- printk("Key Permission.\n");
- break;
-
- case 22:
- printk("Instruction Access Rights.\n");
- break;
-
- case 24: /* General Exception */
- code = (isr >> 4) & 0xf;
- printk("General Exception: %s%s.\n", reason[code],
- (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
- " (data access)") : "");
- if (code == 8) {
-#ifdef CONFIG_IA64_PRINT_HAZARDS
- printk("%s[%d]: possible hazard @ ip=%016lx "
- "(pr = %016lx)\n", current->comm, current->pid,
- regs->cr_iip + ia64_psr(regs)->ri, regs->pr);
-#endif
- printk("ia64_fault: returning on hazard\n");
- return;
- }
- break;
-
- case 25:
- printk("Disabled FP-Register.\n");
- break;
-
- case 26:
- printk("NaT consumption.\n");
- break;
-
- case 29:
- printk("Debug.\n");
- break;
-
- case 30:
- printk("Unaligned Reference.\n");
- break;
-
- case 31:
- printk("Unsupported data reference.\n");
- break;
-
- case 32:
- printk("Floating-Point Fault.\n");
- break;
-
- case 33:
- printk("Floating-Point Trap.\n");
- break;
-
- case 34:
- printk("Lower Privilege Transfer Trap.\n");
- break;
-
- case 35:
- printk("Taken Branch Trap.\n");
- break;
-
- case 36:
- printk("Single Step Trap.\n");
- break;
-
- case 45:
- printk("IA-32 Exception.\n");
- break;
-
- case 46:
- printk("IA-32 Intercept.\n");
- break;
-
- case 47:
- printk("IA-32 Interrupt.\n");
- break;
-
- default:
- printk("Fault %lu\n", vector);
- break;
- }
-
- show_registers(regs);
- panic("Fault in Xen.\n");
-}
-
-/* Also read in hyperprivop.S */
-int first_break = 0;
-
-void
-ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
- unsigned long iim)
-{
- struct domain *d = current->domain;
- struct vcpu *v = current;
- IA64FAULT vector;
-
- /* FIXME: don't hardcode constant */
- if ((iim == 0x80001 || iim == 0x80002)
- && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
- do_ssc(vcpu_get_gr(current, 36), regs);
- }
-#ifdef CRASH_DEBUG
- else if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs)) {
- if (iim == 0)
- show_registers(regs);
- debugger_trap_fatal(0 /* don't care */ , regs);
- regs_increment_iip(regs);
- }
-#endif
- else if (iim == d->arch.breakimm &&
- ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
- /* by default, do not continue */
- v->arch.hypercall_continuation = 0;
-
- if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
- if (!PSCBX(v, hypercall_continuation))
- vcpu_increment_iip(current);
- } else
- reflect_interruption(isr, regs, vector);
- } else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX
- && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
- if (ia64_hyperprivop(iim, regs))
- vcpu_increment_iip(current);
- } else {
- if (iim == 0)
- die_if_kernel("bug check", regs, iim);
- PSCB(v, iim) = iim;
- reflect_interruption(isr, regs, IA64_BREAK_VECTOR);
- }
-}
-
-void
-ia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
- unsigned long itir)
-{
- IA64FAULT vector;
-
- vector = priv_emulate(current, regs, isr);
- if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
- // Note: if a path results in a vector to reflect that requires
- // iha/itir (e.g. vcpu_force_data_miss), they must be set there
- /*
- * IA64_GENEX_VECTOR may contain in the lowest byte an ISR.code
- * see IA64_ILLOP_FAULT, ...
- */
- if ((vector & ~0xffUL) == IA64_GENEX_VECTOR) {
- isr = vector & 0xffUL;
- vector = IA64_GENEX_VECTOR;
- }
- reflect_interruption(isr, regs, vector);
- }
-}
-
-void
-ia64_lazy_load_fpu(struct vcpu *v)
-{
- if (PSCB(v, hpsr_dfh)) {
- PSCB(v, hpsr_dfh) = 0;
- PSCB(v, hpsr_mfh) = 1;
- if (__ia64_per_cpu_var(fp_owner) != v)
- __ia64_load_fpu(v->arch._thread.fph);
- }
-}
-
-void
-ia64_handle_reflection(unsigned long ifa, struct pt_regs *regs,
- unsigned long isr, unsigned long iim,
- unsigned long vector)
-{
- struct vcpu *v = current;
- unsigned long check_lazy_cover = 0;
- unsigned long psr = regs->cr_ipsr;
- unsigned long status;
-
- /* Following faults shouldn't be seen from Xen itself */
- BUG_ON(!(psr & IA64_PSR_CPL));
-
- switch (vector) {
- case 6:
- vector = IA64_INST_KEY_MISS_VECTOR;
- break;
- case 7:
- vector = IA64_DATA_KEY_MISS_VECTOR;
- break;
- case 8:
- vector = IA64_DIRTY_BIT_VECTOR;
- break;
- case 9:
- vector = IA64_INST_ACCESS_BIT_VECTOR;
- break;
- case 10:
- check_lazy_cover = 1;
- vector = IA64_DATA_ACCESS_BIT_VECTOR;
- break;
- case 20:
- check_lazy_cover = 1;
- vector = IA64_PAGE_NOT_PRESENT_VECTOR;
- break;
- case 21:
- vector = IA64_KEY_PERMISSION_VECTOR;
- break;
- case 22:
- vector = IA64_INST_ACCESS_RIGHTS_VECTOR;
- break;
- case 23:
- check_lazy_cover = 1;
- vector = IA64_DATA_ACCESS_RIGHTS_VECTOR;
- break;
- case 24:
- vector = IA64_GENEX_VECTOR;
- break;
- case 25:
- ia64_lazy_load_fpu(v);
- if (!PSCB(v, vpsr_dfh)) {
- regs->cr_ipsr &= ~IA64_PSR_DFH;
- return;
- }
- vector = IA64_DISABLED_FPREG_VECTOR;
- break;
- case 26:
- if (((isr >> 4L) & 0xfL) == 1) {
- /* Fault is due to a register NaT consumption fault. */
- //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
- vector = IA64_NAT_CONSUMPTION_VECTOR;
- break;
- }
-#if 1
- // pass null pointer dereferences through with no error
- // but retain debug output for non-zero ifa
- if (!ifa) {
- vector = IA64_NAT_CONSUMPTION_VECTOR;
- break;
- }
-#endif
-#ifdef CONFIG_PRIVIFY
- /* Some privified operations are coded using reg+64 instead
- of reg. */
- printk("*** NaT fault... attempting to handle as privop\n");
- printk("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
- isr, ifa, regs->cr_iip, psr);
- //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
- // certain NaT faults are higher priority than privop faults
- vector = priv_emulate(v, regs, isr);
- if (vector == IA64_NO_FAULT) {
- printk("*** Handled privop masquerading as NaT "
- "fault\n");
- return;
- }
-#endif
- vector = IA64_NAT_CONSUMPTION_VECTOR;
- break;
- case 27:
- //printk("*** Handled speculation vector, itc=%lx!\n",
- // ia64_get_itc());
- PSCB(current, iim) = iim;
- vector = IA64_SPECULATION_VECTOR;
- break;
- case 29:
- vector = IA64_DEBUG_VECTOR;
- if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_DEBUG))
- return;
- break;
- case 30:
- // FIXME: Should we handle unaligned refs in Xen??
- vector = IA64_UNALIGNED_REF_VECTOR;
- break;
- case 32:
- status = handle_fpu_swa(1, regs, isr);
- if (!status) {
- vcpu_increment_iip(v);
- return;
- }
- vector = IA64_FP_FAULT_VECTOR;
- break;
- case 33:
- status = handle_fpu_swa(0, regs, isr);
- if (!status)
- return;
- vector = IA64_FP_TRAP_VECTOR;
- break;
- case 34:
- if (isr & (1UL << 4))
- printk("ia64_handle_reflection: handling "
- "unimplemented instruction address %s\n",
- (isr & (1UL<<32)) ? "fault" : "trap");
- vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR;
- break;
- case 35:
- vector = IA64_TAKEN_BRANCH_TRAP_VECTOR;
- if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_TBRANCH))
- return;
- break;
- case 36:
- vector = IA64_SINGLE_STEP_TRAP_VECTOR;
- if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_SSTEP))
- return;
- break;
-
- default:
- panic_domain(regs, "ia64_handle_reflection: "
- "unhandled vector=0x%lx\n", vector);
- return;
- }
- if (check_lazy_cover && (isr & IA64_ISR_IR) &&
- handle_lazy_cover(v, regs))
- return;
- PSCB(current, ifa) = ifa;
- PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa);
- reflect_interruption(isr, regs, vector);
-}
-
-void
-ia64_shadow_fault(unsigned long ifa, unsigned long itir,
- unsigned long isr, struct pt_regs *regs)
-{
- struct vcpu *v = current;
- struct domain *d = current->domain;
- unsigned long gpfn;
- unsigned long pte = 0;
- struct vhpt_lf_entry *vlfe;
-
- /*
- * v->arch.vhpt_pg_shift shouldn't be used here.
- * Currently dirty page logging bitmap is allocated based
- * on PAGE_SIZE. This is part of xen_domctl_shadow_op ABI.
- * If we want to log dirty pages in finer grained when
- * v->arch.vhpt_pg_shift < PAGE_SHIFT, we have to
- * revise the ABI and update this function and the related
- * tool stack (live relocation).
- */
- unsigned long vhpt_pg_shift = PAGE_SHIFT;
-
- /* There are 2 jobs to do:
- - marking the page as dirty (the metaphysical address must be
- extracted to do that).
- - reflecting or not the fault (the virtual Dirty bit must be
- extracted to decide).
- Unfortunatly these informations are not immediatly available!
- */
-
- /* Extract the metaphysical address.
- Try to get it from VHPT and M2P as we need the flags. */
- vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa);
- pte = vlfe->page_flags;
- if (vlfe->ti_tag == ia64_ttag(ifa)) {
- /* The VHPT entry is valid. */
- gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >>
- vhpt_pg_shift);
- BUG_ON(gpfn == INVALID_M2P_ENTRY);
- } else {
- unsigned long itir, iha;
- IA64FAULT fault;
-
- /* The VHPT entry is not valid. */
- vlfe = NULL;
-
- /* FIXME: gives a chance to tpa, as the TC was valid. */
-
- fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha);
-
- /* Try again! */
- if (fault != IA64_NO_FAULT) {
- /* This will trigger a dtlb miss. */
- ia64_ptcl(ifa, vhpt_pg_shift << 2);
- return;
- }
- gpfn = ((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift);
- if (pte & _PAGE_D)
- pte |= _PAGE_VIRT_D;
- }
-
- /* Set the dirty bit in the bitmap. */
- shadow_mark_page_dirty(d, gpfn);
-
- /* Update the local TC/VHPT and decides wether or not the fault should
- be reflected.
- SMP note: we almost ignore the other processors. The shadow_bitmap
- has been atomically updated. If the dirty fault happen on another
- processor, it will do its job.
- */
-
- if (pte != 0) {
- /* We will know how to handle the fault. */
-
- if (pte & _PAGE_VIRT_D) {
- /* Rewrite VHPT entry.
- There is no race here because only the
- cpu VHPT owner can write page_flags. */
- if (vlfe)
- vlfe->page_flags = pte | _PAGE_D;
-
- /* Purge the TC locally.
- It will be reloaded from the VHPT iff the
- VHPT entry is still valid. */
- ia64_ptcl(ifa, vhpt_pg_shift << 2);
-
- atomic64_inc(&d->arch.shadow_fault_count);
- } else {
- /* Reflect.
- In this case there is no need to purge. */
- ia64_handle_reflection(ifa, regs, isr, 0, 8);
- }
- } else {
- /* We don't know wether or not the fault must be
- reflected. The VHPT entry is not valid. */
- /* FIXME: in metaphysical mode, we could do an ITC now. */
- ia64_ptcl(ifa, vhpt_pg_shift << 2);
- }
-}
diff --git a/xen/arch/ia64/xen/flushd.S b/xen/arch/ia64/xen/flushd.S
deleted file mode 100644
index 76ac5f4b2a..0000000000
--- a/xen/arch/ia64/xen/flushd.S
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Cache flushing routines.
- *
- * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 05/28/05 Zoltan Menyhart Dynamic stride size
- * 03/31/06 Tristan Gingold copied and modified for dcache.
- */
-
-#include <asm/asmmacro.h>
-
-
- /*
- * flush_dcache_range(start,end)
- *
- * Flush cache.
- *
- * Must deal with range from start to end-1 but nothing else
- * (need to be careful not to touch addresses that may be
- * unmapped).
- *
- * Note: "in0" and "in1" are preserved for debugging purposes.
- */
-GLOBAL_ENTRY(flush_dcache_range)
-
- .prologue
- alloc r2=ar.pfs,2,0,0,0
- movl r3=ia64_d_cache_stride_shift
- mov r21=1
- ;;
- ld8 r20=[r3] // r20: stride shift
- sub r22=in1,r0,1 // last byte address
- ;;
- shr.u r23=in0,r20 // start / (stride size)
- shr.u r22=r22,r20 // (last byte address) / (stride size)
- shl r21=r21,r20 // r21: stride size of the i-cache(s)
- ;;
- sub r8=r22,r23 // number of strides - 1
- shl r24=r23,r20 // r24: addresses for "fc" =
- // "start" rounded down to stride
- // boundary
- .save ar.lc,r3
- mov r3=ar.lc // save ar.lc
- ;;
-
- .body
- mov ar.lc=r8
- ;;
- /*
- * 32 byte aligned loop, even number of (actually 2) bundles
- */
-.Loop: fc r24 // issuable on M0 only
- add r24=r21,r24 // we flush "stride size" bytes per
- // iteration
- nop.i 0
- br.cloop.sptk.few .Loop
- ;;
- sync.i
- ;;
- srlz.i
- ;;
- mov ar.lc=r3 // restore ar.lc
- br.ret.sptk.many rp
-END(flush_dcache_range)
diff --git a/xen/arch/ia64/xen/flushtlb.c b/xen/arch/ia64/xen/flushtlb.c
deleted file mode 100644
index e8ff594384..0000000000
--- a/xen/arch/ia64/xen/flushtlb.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/******************************************************************************
- * flushtlb.c
- * based on x86 flushtlb.c
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <xen/sched.h>
-#include <xen/softirq.h>
-#include <asm/vcpu.h>
-#include <asm/vhpt.h>
-#include <asm/flushtlb.h>
-
-/* Debug builds: Wrap frequently to stress-test the wrap logic. */
-#ifdef NDEBUG
-#define WRAP_MASK (0xFFFFFFFFU)
-#else
-#define WRAP_MASK (0x000003FFU)
-#endif
-
-volatile u32 tlbflush_clock = 1U; /* 1 greater than tlbflush_time. */
-DEFINE_PER_CPU(volatile u32, tlbflush_time);
-
-u32
-tlbflush_clock_inc_and_return(void)
-{
- u32 t, t1, t2;
-
- t = tlbflush_clock;
- do {
- t1 = t2 = t;
- /* Clock wrapped: someone else is leading a global TLB shootdown. */
- if (unlikely(t1 == 0))
- return t2;
- t2 = (t + 1) & WRAP_MASK;
- t = ia64_cmpxchg(acq, &tlbflush_clock, t1, t2, sizeof(tlbflush_clock));
- } while (unlikely(t != t1));
-
- /* Clock wrapped: we will lead a global TLB shootdown. */
- if (unlikely(t2 == 0))
- raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);
-
- return t2;
-}
-
-static void
-tlbflush_clock_local_flush(void *unused)
-{
- local_vhpt_flush();
- local_flush_tlb_all();
-}
-
-void
-new_tlbflush_clock_period(void)
-{
- /* flush all vhpt of physical cpu and mTLB */
- on_each_cpu(tlbflush_clock_local_flush, NULL, 1);
-
- /*
- * if global TLB shootdown is finished, increment tlbflush_time
- * atomic operation isn't necessary because we know that tlbflush_clock
- * stays 0.
- */
- tlbflush_clock++;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/ia64/xen/fw_emul.c b/xen/arch/ia64/xen/fw_emul.c
deleted file mode 100644
index 397c336c98..0000000000
--- a/xen/arch/ia64/xen/fw_emul.c
+++ /dev/null
@@ -1,1622 +0,0 @@
-/*
- * fw_emul.c:
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-#include <xen/config.h>
-#include <asm/system.h>
-#include <asm/pgalloc.h>
-
-#include <linux/efi.h>
-#include <asm/pal.h>
-#include <asm/sal.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/hubdev.h>
-#include <asm/xenmca.h>
-
-#include <public/sched.h>
-#include "hpsim_ssc.h"
-#include <asm/vcpu.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/dom_fw.h>
-#include <xen/guest_access.h>
-#include <xen/console.h>
-#include <xen/hypercall.h>
-#include <xen/softirq.h>
-#include <xen/time.h>
-#include <asm/debugger.h>
-#include <asm/vmx_phy_mode.h>
-
-static DEFINE_SPINLOCK(efi_time_services_lock);
-
-struct sal_mc_params {
- u64 param_type;
- u64 i_or_m;
- u64 i_or_m_val;
- u64 timeout;
- u64 rz_always;
-} sal_mc_params[SAL_MC_PARAM_CPE_INT + 1];
-
-struct sal_vectors {
- u64 vector_type;
- u64 handler_addr1;
- u64 gp1;
- u64 handler_len1;
- u64 handler_addr2;
- u64 gp2;
- u64 handler_len2;
-} sal_vectors[SAL_VECTOR_OS_BOOT_RENDEZ + 1];
-
-struct smp_call_args_t {
- u64 type;
- u64 ret;
- u64 target;
- struct domain *domain;
- int corrected;
- int status;
- void *data;
-};
-
-extern sal_log_record_header_t *sal_record;
-DEFINE_SPINLOCK(sal_record_lock);
-
-extern spinlock_t sal_queue_lock;
-
-#define IA64_SAL_NO_INFORMATION_AVAILABLE -5
-
-#if defined(IA64_SAL_DEBUG_INFO)
-static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
-
-# define IA64_SAL_DEBUG(fmt...) printk("sal_emulator: " fmt)
-#else
-# define IA64_SAL_DEBUG(fmt...)
-#endif
-
-void get_state_info_on(void *data) {
- struct smp_call_args_t *arg = data;
- int flags;
-
- spin_lock_irqsave(&sal_record_lock, flags);
- memset(sal_record, 0, ia64_sal_get_state_info_size(arg->type));
- arg->ret = ia64_sal_get_state_info(arg->type, (u64 *)sal_record);
- IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) on CPU#%d returns %ld.\n",
- rec_name[arg->type], smp_processor_id(), arg->ret);
- if (arg->corrected) {
- sal_record->severity = sal_log_severity_corrected;
- IA64_SAL_DEBUG("%s: IA64_SAL_GET_STATE_INFO(SAL_INFO_TYPE_MCA)"
- " force\n", __FUNCTION__);
- }
- if (arg->ret > 0) {
- /*
- * Save current->domain and set to local(caller) domain for
- * xencomm_paddr_to_maddr() which calculates maddr from
- * paddr using mpa value of current->domain.
- */
- struct domain *save;
- save = current->domain;
- current->domain = arg->domain;
- if (xencomm_copy_to_guest((void*)arg->target,
- sal_record, arg->ret, 0)) {
- printk("SAL_GET_STATE_INFO can't copy to user!!!!\n");
- arg->status = IA64_SAL_NO_INFORMATION_AVAILABLE;
- arg->ret = 0;
- }
- /* Restore current->domain to saved value. */
- current->domain = save;
- }
- spin_unlock_irqrestore(&sal_record_lock, flags);
-}
-
-void clear_state_info_on(void *data) {
- struct smp_call_args_t *arg = data;
-
- arg->ret = ia64_sal_clear_state_info(arg->type);
- IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) on CPU#%d returns %ld.\n",
- rec_name[arg->type], smp_processor_id(), arg->ret);
-
-}
-
-struct sal_ret_values
-sal_emulator (long index, unsigned long in1, unsigned long in2,
- unsigned long in3, unsigned long in4, unsigned long in5,
- unsigned long in6, unsigned long in7)
-{
- struct ia64_sal_retval ret_stuff;
- unsigned long r9 = 0;
- unsigned long r10 = 0;
- long r11 = 0;
- long status;
-
- debugger_event(XEN_IA64_DEBUG_ON_SAL);
-
- status = 0;
- switch (index) {
- case SAL_FREQ_BASE:
- if (likely(!running_on_sim))
- status = ia64_sal_freq_base(in1,&r9,&r10);
- else switch (in1) {
- case SAL_FREQ_BASE_PLATFORM:
- r9 = 200000000;
- break;
-
- case SAL_FREQ_BASE_INTERVAL_TIMER:
- r9 = 700000000;
- break;
-
- case SAL_FREQ_BASE_REALTIME_CLOCK:
- r9 = 1;
- break;
-
- default:
- status = -1;
- break;
- }
- break;
- case SAL_PCI_CONFIG_READ:
- if (current->domain == dom0) {
- u64 value;
- // note that args 2&3 are swapped!!
- status = ia64_sal_pci_config_read(in1,in3,in2,&value);
- r9 = value;
- }
- else
- printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
- break;
- case SAL_PCI_CONFIG_WRITE:
- if (current->domain == dom0) {
- if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
- (in4 > 1) ||
- (in2 > 8) || (in2 & (in2-1)))
- printk("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
- in1,in4,in2,in3);
- // note that args are in a different order!!
- status = ia64_sal_pci_config_write(in1,in4,in2,in3);
- }
- else
- printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
- break;
- case SAL_SET_VECTORS:
- if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
- if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
- /* Sanity check: cs_length1 must be 0,
- second vector is reserved. */
- status = -2;
- }
- else {
- struct domain *d = current->domain;
- d->arch.sal_data->boot_rdv_ip = in2;
- d->arch.sal_data->boot_rdv_r1 = in3;
- }
- }
- else if (current->domain == dom0) {
- if (in1 >
- sizeof(sal_vectors)/sizeof(sal_vectors[0])-1) {
- gdprintk(XENLOG_DEBUG,
- "SAL_SET_VECTORS invalid in1 %ld\n",
- in1);
- status = -2;
- break;
- }
- sal_vectors[in1].vector_type = in1;
- sal_vectors[in1].handler_addr1 = in2;
- sal_vectors[in1].gp1 = in3;
- sal_vectors[in1].handler_len1 = in4;
- sal_vectors[in1].handler_addr2 = in5;
- sal_vectors[in1].gp2 = in6;
- sal_vectors[in1].handler_len2 = in7;
- } else {
- gdprintk(XENLOG_DEBUG, "NON-PRIV DOMAIN CALLED "
- "SAL_SET_VECTORS %ld\n", in1);
- /*
- * status = -2;
- * Temporal work around untill gfw support:
- * windows 2003 sp2/sp1 dislike -2 to crash.
- */
- status = 0;
- }
- break;
- case SAL_GET_STATE_INFO:
- if (current->domain == dom0) {
- sal_queue_entry_t *e;
- unsigned long flags;
- struct smp_call_args_t arg;
-
- spin_lock_irqsave(&sal_queue_lock, flags);
- if (!sal_queue || list_empty(&sal_queue[in1])) {
- sal_log_record_header_t header;
- XEN_GUEST_HANDLE(void) handle =
- *(XEN_GUEST_HANDLE(void)*)&in3;
-
- IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
- "no sal_queue entry found.\n",
- rec_name[in1]);
- memset(&header, 0, sizeof(header));
-
- if (copy_to_guest(handle, &header, 1)) {
- printk("sal_emulator: "
- "SAL_GET_STATE_INFO can't copy "
- "empty header to user: 0x%lx\n",
- in3);
- }
- status = IA64_SAL_NO_INFORMATION_AVAILABLE;
- r9 = 0;
- spin_unlock_irqrestore(&sal_queue_lock, flags);
- break;
- }
- e = list_entry(sal_queue[in1].next,
- sal_queue_entry_t, list);
-
- list_del(&e->list);
- spin_unlock_irqrestore(&sal_queue_lock, flags);
-
- IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
- "on CPU#%d.\n",
- rec_name[e->sal_info_type],
- rec_name[in1], e->cpuid);
-
- arg.type = e->sal_info_type;
- arg.target = in3;
- arg.corrected = !!((in1 != e->sal_info_type) &&
- (e->sal_info_type == SAL_INFO_TYPE_MCA));
- arg.domain = current->domain;
- arg.status = 0;
-
- if (e->cpuid == smp_processor_id()) {
- IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
- get_state_info_on(&arg);
- } else {
- int ret;
- IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
- ret = smp_call_function_single(e->cpuid,
- get_state_info_on,
- &arg, 1);
- if (ret < 0) {
- printk("SAL_GET_STATE_INFO "
- "smp_call_function_single error:"
- " %d\n", ret);
- arg.ret = 0;
- arg.status =
- IA64_SAL_NO_INFORMATION_AVAILABLE;
- }
- }
- r9 = arg.ret;
- status = arg.status;
- if (r9 != 0) {
- /* Re-add the entry to sal_queue */
- spin_lock_irqsave(&sal_queue_lock, flags);
- list_add(&e->list, &sal_queue[in1]);
- spin_unlock_irqrestore(&sal_queue_lock, flags);
- }
- } else {
- status = IA64_SAL_NO_INFORMATION_AVAILABLE;
- r9 = 0;
- }
- break;
- case SAL_GET_STATE_INFO_SIZE:
- r9 = ia64_sal_get_state_info_size(in1);
- break;
- case SAL_CLEAR_STATE_INFO:
- if (current->domain == dom0) {
- sal_queue_entry_t *e;
- unsigned long flags;
- struct smp_call_args_t arg;
-
- spin_lock_irqsave(&sal_queue_lock, flags);
- if (list_empty(&sal_queue[in1])) {
- IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) "
- "no sal_queue entry found.\n",
- rec_name[in1]);
- status = IA64_SAL_NO_INFORMATION_AVAILABLE;
- r9 = 0;
- spin_unlock_irqrestore(&sal_queue_lock, flags);
- break;
- }
- e = list_entry(sal_queue[in1].next,
- sal_queue_entry_t, list);
-
- list_del(&e->list);
- spin_unlock_irqrestore(&sal_queue_lock, flags);
-
- IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s <= %s) "
- "on CPU#%d.\n",
- rec_name[e->sal_info_type],
- rec_name[in1], e->cpuid);
-
- arg.type = e->sal_info_type;
- arg.status = 0;
-
- if (e->cpuid == smp_processor_id()) {
- IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: local\n");
- clear_state_info_on(&arg);
- } else {
- int ret;
- IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: remote\n");
- ret = smp_call_function_single(e->cpuid,
- clear_state_info_on, &arg, 1);
- if (ret < 0) {
- printk("sal_emulator: "
- "SAL_CLEAR_STATE_INFO "
- "smp_call_function_single error:"
- " %d\n", ret);
- arg.ret = 0;
- arg.status =
- IA64_SAL_NO_INFORMATION_AVAILABLE;
- }
- }
- r9 = arg.ret;
- status = arg.status;
- if (r9 >= 0) {
- IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: more errors are available\n");
- spin_lock_irqsave(&sal_queue_lock, flags);
- list_add(&e->list, &sal_queue[in1]);
- spin_unlock_irqrestore(&sal_queue_lock, flags);
- }
- }
- break;
- case SAL_MC_RENDEZ:
- printk("*** CALLED SAL_MC_RENDEZ. IGNORED...\n");
- break;
- case SAL_MC_SET_PARAMS:
- if (current->domain == dom0) {
- if (in1 >
- sizeof(sal_mc_params) / sizeof(sal_mc_params[0])) {
- gdprintk(XENLOG_DEBUG,
- "SAL_MC_SET_PARAMS invalid in1 %ld\n",
- in1);
- status = -2;
- break;
- }
- sal_mc_params[in1].param_type = in1;
- sal_mc_params[in1].i_or_m = in2;
- sal_mc_params[in1].i_or_m_val = in3;
- sal_mc_params[in1].timeout = in4;
- sal_mc_params[in1].rz_always = in5;
- } else {
- gdprintk(XENLOG_DEBUG,
- "*** CALLED SAL_MC_SET_PARAMS. IGNORED...\n");
- /*
- * status = -1;
- * Temporal work around untill gfw support:
- * windows 2003 sp2/sp1 dislike -1(not implemented)
- * to crash.
- */
- status = 0;
- }
- break;
- case SAL_CACHE_FLUSH:
- if (1) {
- /* Flush using SAL.
- This method is faster but has a side effect on
- other vcpu running on this cpu. */
- status = ia64_sal_cache_flush (in1);
- }
- else {
- /* Flush with fc all the domain.
- This method is slower but has no side effects. */
- domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
- status = 0;
- }
- break;
- case SAL_CACHE_INIT:
- printk("*** CALLED SAL_CACHE_INIT. IGNORED...\n");
- break;
- case SAL_UPDATE_PAL:
- printk("*** CALLED SAL_UPDATE_PAL. IGNORED...\n");
- break;
- case SAL_PHYSICAL_ID_INFO:
- status = -1;
- break;
- case SAL_XEN_SAL_RETURN:
- if (!test_and_set_bit(_VPF_down, &current->pause_flags))
- vcpu_sleep_nosync(current);
- break;
- case SN_SAL_GET_MASTER_NASID:
- status = -1;
- if (current->domain == dom0) {
- /* printk("*** Emulating SN_SAL_GET_MASTER_NASID ***\n"); */
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_MASTER_NASID,
- 0, 0, 0, 0, 0, 0, 0);
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
- case SN_SAL_GET_KLCONFIG_ADDR:
- status = -1;
- if (current->domain == dom0) {
- /* printk("*** Emulating SN_SAL_GET_KLCONFIG_ADDR ***\n"); */
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR,
- in1, 0, 0, 0, 0, 0, 0);
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
- case SN_SAL_GET_SAPIC_INFO:
- status = -1;
- if (current->domain == dom0) {
- /* printk("*** Emulating SN_SAL_GET_SAPIC_INFO ***\n"); */
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO,
- in1, 0, 0, 0, 0, 0, 0);
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
- case SN_SAL_GET_SN_INFO:
- status = -1;
- if (current->domain == dom0) {
- /* printk("*** Emulating SN_SAL_GET_SN_INFO ***\n"); */
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO,
- in1, 0, 0, 0, 0, 0, 0);
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
- case SN_SAL_IOIF_GET_HUBDEV_INFO:
- status = -1;
- if (current->domain == dom0) {
- /* printk("*** Emulating SN_SAL_IOIF_GET_HUBDEV_INFO ***\n"); */
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_IOIF_GET_HUBDEV_INFO,
- in1, in2, 0, 0, 0, 0, 0);
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
- case SN_SAL_IOIF_INIT:
- status = -1;
- if (current->domain == dom0) {
- /* printk("*** Emulating SN_SAL_IOIF_INIT ***\n"); */
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_IOIF_INIT,
- 0, 0, 0, 0, 0, 0, 0);
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
- case SN_SAL_GET_PROM_FEATURE_SET:
- status = -1;
- if (current->domain == dom0) {
- /* printk("*** Emulating SN_SAL_GET_PROM_FEATURE_SET ***\n"); */
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_PROM_FEATURE_SET,
- in1, 0, 0, 0, 0, 0, 0);
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
- case SN_SAL_SET_OS_FEATURE_SET:
- status = -1;
- if (current->domain == dom0) {
- /* printk("*** Emulating SN_SAL_SET_OS_FEATURE_SET ***\n"); */
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SET_OS_FEATURE_SET,
- in1, 0, 0, 0, 0, 0, 0);
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
- case SN_SAL_SET_ERROR_HANDLING_FEATURES:
- status = -1;
- if (current->domain == dom0) {
- /* printk("*** Emulating SN_SAL_SET_ERROR_HANDLING_FEATURES ***\n"); */
- SAL_CALL_NOLOCK(ret_stuff,
- SN_SAL_SET_ERROR_HANDLING_FEATURES,
- in1, 0, 0, 0, 0, 0, 0);
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
-#if 0
-/*
- * Somehow ACPI breaks if allowing this one
- */
- case SN_SAL_SET_CPU_NUMBER:
- status = -1;
- if (current->domain == dom0) {
- printk("*** Emulating SN_SAL_SET_CPU_NUMBER ***\n");
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SET_CPU_NUMBER,
- in1, 0, 0, 0, 0, 0, 0);
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
-#endif
- case SN_SAL_LOG_CE:
- status = -1;
- if (current->domain == dom0) {
- static int log_ce = 0;
- if (!log_ce) {
- printk("*** Emulating SN_SAL_LOG_CE *** "
- " this will only be printed once\n");
- log_ce = 1;
- }
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE,
- 0, 0, 0, 0, 0, 0, 0);
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
- case SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST:
- status = -1;
- if (current->domain == dom0) {
- struct sn_flush_device_common flush;
- int flush_size;
-
- flush_size = sizeof(struct sn_flush_device_common);
- memset(&flush, 0, flush_size);
- SAL_CALL_NOLOCK(ret_stuff,
- SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
- in1, in2, in3, &flush, 0, 0, 0);
-#if 0
- printk("*** Emulating "
- "SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST ***\n");
-#endif
- if (ret_stuff.status == SALRET_OK) {
- XEN_GUEST_HANDLE(void) handle =
- *(XEN_GUEST_HANDLE(void)*)&in4;
- if (copy_to_guest(handle, &flush, 1)) {
- printk("SN_SAL_IOIF_GET_DEVICE_"
- "DMAFLUSH_LIST can't copy "
- "to user!\n");
- ret_stuff.status = SALRET_ERROR;
- }
- }
-
- status = ret_stuff.status;
- r9 = ret_stuff.v0;
- r10 = ret_stuff.v1;
- r11 = ret_stuff.v2;
- }
- break;
- default:
- printk("*** CALLED SAL_ WITH UNKNOWN INDEX (%lx). "
- "IGNORED...\n", index);
- status = -1;
- break;
- }
- return ((struct sal_ret_values) {status, r9, r10, r11});
-}
-
-static int
-safe_copy_to_guest(unsigned long to, void *from, long size)
-{
- BUG_ON((unsigned)size > PAGE_SIZE);
-
- if (VMX_DOMAIN(current)) {
- if (is_virtual_mode(current)) {
- thash_data_t *data;
- unsigned long gpa, poff;
-
- /* The caller must provide a DTR or DTC mapping */
- data = vtlb_lookup(current, to, DSIDE_TLB);
- if (data) {
- gpa = data->page_flags & _PAGE_PPN_MASK;
- } else {
- data = vhpt_lookup(to);
- if (!data)
- return -1;
- gpa = __mpa_to_gpa(
- data->page_flags & _PAGE_PPN_MASK);
- gpa &= _PAGE_PPN_MASK;
- }
- poff = POFFSET(to, data->ps);
- if (poff + size > PSIZE(data->ps))
- return -1;
- to = PAGEALIGN(gpa, data->ps) | poff;
- }
- to |= XENCOMM_INLINE_FLAG;
- if (xencomm_copy_to_guest((void *)to, from, size, 0) != 0)
- return -1;
- return 0;
- } else {
- /* check for vulnerability */
- if (IS_VMM_ADDRESS(to) || IS_VMM_ADDRESS(to + size - 1))
- panic_domain(NULL, "copy to bad address:0x%lx\n", to);
- return copy_to_user((void __user *)to, from, size);
- }
-}
-
-cpumask_t cpu_cache_coherent_map;
-
-struct cache_flush_args {
- u64 cache_type;
- u64 operation;
- u64 progress;
- long status;
-};
-
-static void
-remote_pal_cache_flush(void *v)
-{
- struct cache_flush_args *args = v;
- long status;
- u64 progress = args->progress;
-
- status = ia64_pal_cache_flush(args->cache_type, args->operation,
- &progress, NULL);
- if (status != 0)
- args->status = status;
-}
-
-static void
-remote_pal_prefetch_visibility(void *v)
-{
- s64 trans_type = (s64)v;
- ia64_pal_prefetch_visibility(trans_type);
-}
-
-static void
-remote_pal_mc_drain(void *v)
-{
- ia64_pal_mc_drain();
-}
-
-struct ia64_pal_retval
-xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
-{
- unsigned long r9 = 0;
- unsigned long r10 = 0;
- unsigned long r11 = 0;
- long status = PAL_STATUS_UNIMPLEMENTED;
- unsigned long flags;
- int processor;
-
- if (unlikely(running_on_sim))
- return pal_emulator_static(index);
-
- debugger_event(XEN_IA64_DEBUG_ON_PAL);
-
- // pal code must be mapped by a TR when pal is called, however
- // calls are rare enough that we will map it lazily rather than
- // at every context switch
- //efi_map_pal_code();
- switch (index) {
- case PAL_MEM_ATTRIB:
- status = ia64_pal_mem_attrib(&r9);
- break;
- case PAL_FREQ_BASE:
- status = ia64_pal_freq_base(&r9);
- if (status == PAL_STATUS_UNIMPLEMENTED) {
- status = ia64_sal_freq_base(0, &r9, &r10);
- r10 = 0;
- }
- break;
- case PAL_PROC_GET_FEATURES:
- status = ia64_pal_proc_get_features(&r9,&r10,&r11);
- break;
- case PAL_BUS_GET_FEATURES:
- status = ia64_pal_bus_get_features(
- (pal_bus_features_u_t *) &r9,
- (pal_bus_features_u_t *) &r10,
- (pal_bus_features_u_t *) &r11);
- break;
- case PAL_FREQ_RATIOS:
- status = ia64_pal_freq_ratios(
- (struct pal_freq_ratio *) &r9,
- (struct pal_freq_ratio *) &r10,
- (struct pal_freq_ratio *) &r11);
- break;
- case PAL_PTCE_INFO:
- /*
- * return hard-coded xen-specific values because ptc.e
- * is emulated on xen to always flush everything
- * these values result in only one ptc.e instruction
- */
- status = PAL_STATUS_SUCCESS;
- r10 = (1L << 32) | 1L;
- break;
- case PAL_VERSION:
- status = ia64_pal_version(
- (pal_version_u_t *) &r9,
- (pal_version_u_t *) &r10);
- break;
- case PAL_VM_PAGE_SIZE:
- status = ia64_pal_vm_page_size(&r9,&r10);
- break;
- case PAL_DEBUG_INFO:
- status = ia64_pal_debug_info(&r9,&r10);
- break;
- case PAL_CACHE_SUMMARY:
- status = ia64_pal_cache_summary(&r9,&r10);
- break;
- case PAL_VM_SUMMARY:
- if (VMX_DOMAIN(current)) {
- pal_vm_info_1_u_t v1;
- pal_vm_info_2_u_t v2;
- status = ia64_pal_vm_summary((pal_vm_info_1_u_t *)&v1,
- (pal_vm_info_2_u_t *)&v2);
- v1.pal_vm_info_1_s.max_itr_entry = NITRS - 1;
- v1.pal_vm_info_1_s.max_dtr_entry = NDTRS - 1;
- v2.pal_vm_info_2_s.impl_va_msb -= 1;
- v2.pal_vm_info_2_s.rid_size =
- current->domain->arch.rid_bits;
- r9 = v1.pvi1_val;
- r10 = v2.pvi2_val;
- } else {
- /* Use xen-specific values.
- hash_tag_id is somewhat random! */
- static const pal_vm_info_1_u_t v1 =
- {.pal_vm_info_1_s =
- { .vw = 1,
- .phys_add_size = 44,
- .key_size = 16,
- .max_pkr = XEN_IA64_NPKRS,
- .hash_tag_id = 0x30,
- .max_dtr_entry = NDTRS - 1,
- .max_itr_entry = NITRS - 1,
- .max_unique_tcs = 3,
- .num_tc_levels = 2
- }};
- pal_vm_info_2_u_t v2;
- v2.pvi2_val = 0;
- v2.pal_vm_info_2_s.rid_size =
- current->domain->arch.rid_bits;
- v2.pal_vm_info_2_s.impl_va_msb = 50;
- r9 = v1.pvi1_val;
- r10 = v2.pvi2_val;
- status = PAL_STATUS_SUCCESS;
- }
- break;
- case PAL_VM_INFO:
- if (VMX_DOMAIN(current)) {
- status = ia64_pal_vm_info(in1, in2,
- (pal_tc_info_u_t *)&r9, &r10);
- break;
- }
- if (in1 == 0 && in2 == 2) {
- /* Level 1: VHPT */
- const pal_tc_info_u_t v =
- { .pal_tc_info_s = {.num_sets = 128,
- .associativity = 1,
- .num_entries = 128,
- .pf = 1,
- .unified = 1,
- .reduce_tr = 0,
- .reserved = 0}};
- r9 = v.pti_val;
- /* Only support PAGE_SIZE tc. */
- r10 = PAGE_SIZE;
- status = PAL_STATUS_SUCCESS;
- }
- else if (in1 == 1 && (in2 == 1 || in2 == 2)) {
- /* Level 2: itlb/dtlb, 1 entry. */
- const pal_tc_info_u_t v =
- { .pal_tc_info_s = {.num_sets = 1,
- .associativity = 1,
- .num_entries = 1,
- .pf = 1,
- .unified = 0,
- .reduce_tr = 0,
- .reserved = 0}};
- r9 = v.pti_val;
- /* Only support PAGE_SIZE tc. */
- r10 = PAGE_SIZE;
- status = PAL_STATUS_SUCCESS;
- } else
- status = PAL_STATUS_EINVAL;
- break;
- case PAL_RSE_INFO:
- status = ia64_pal_rse_info(&r9, (pal_hints_u_t *)&r10);
- break;
- case PAL_REGISTER_INFO:
- status = ia64_pal_register_info(in1, &r9, &r10);
- break;
- case PAL_CACHE_FLUSH:
- if (in3 != 0) /* Initially progress_indicator must be 0 */
- panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
- "progress_indicator=%lx", in3);
-
- /* Always call Host Pal in int=0 */
- in2 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
-
- if (in1 != PAL_CACHE_TYPE_COHERENT) {
- struct cache_flush_args args = {
- .cache_type = in1,
- .operation = in2,
- .progress = 0,
- .status = 0
- };
- smp_call_function(remote_pal_cache_flush, &args, 1);
- if (args.status != 0)
- panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
- "remote status %lx", args.status);
- }
-
- /*
- * Call Host PAL cache flush
- * Clear psr.ic when call PAL_CACHE_FLUSH
- */
- r10 = in3;
- local_irq_save(flags);
- processor = current->processor;
- status = ia64_pal_cache_flush(in1, in2, &r10, &r9);
- local_irq_restore(flags);
-
- if (status != 0)
- panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
- "status %lx", status);
-
- if (in1 == PAL_CACHE_TYPE_COHERENT) {
- cpumask_complement(&current->arch.cache_coherent_map,
- cpumask_of(processor));
- cpumask_complement(&cpu_cache_coherent_map,
- cpumask_of(processor));
- }
- break;
- case PAL_PERF_MON_INFO:
- {
- unsigned long pm_buffer[16];
- status = ia64_pal_perf_mon_info(
- pm_buffer,
- (pal_perf_mon_info_u_t *) &r9);
- if (status != 0) {
- printk("PAL_PERF_MON_INFO fails ret=%ld\n", status);
- break;
- }
- if (safe_copy_to_guest(
- in1, pm_buffer, sizeof(pm_buffer))) {
- status = PAL_STATUS_EINVAL;
- goto fail_to_copy;
- }
- }
- break;
- case PAL_CACHE_INFO:
- {
- pal_cache_config_info_t ci;
- status = ia64_pal_cache_config_info(in1,in2,&ci);
- if (status != 0)
- break;
- r9 = ci.pcci_info_1.pcci1_data;
- r10 = ci.pcci_info_2.pcci2_data;
- }
- break;
- case PAL_VM_TR_READ: /* FIXME: vcpu_get_tr?? */
- printk("%s: PAL_VM_TR_READ unimplmented, ignored\n", __func__);
- break;
- case PAL_HALT_INFO:
- {
- /* 1000 cycles to enter/leave low power state,
- consumes 10 mW, implemented and cache/TLB coherent. */
- unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
- | (1UL << 61) | (1UL << 60);
- if (safe_copy_to_guest (in1, &res, sizeof (res))) {
- status = PAL_STATUS_EINVAL;
- goto fail_to_copy;
- }
- status = PAL_STATUS_SUCCESS;
- }
- break;
- case PAL_HALT:
- set_bit(_VPF_down, &current->pause_flags);
- vcpu_sleep_nosync(current);
- status = PAL_STATUS_SUCCESS;
- break;
- case PAL_HALT_LIGHT:
- if (VMX_DOMAIN(current)) {
- /* Called by VTI. */
- if (!is_unmasked_irq(current)) {
- do_sched_op_compat(SCHEDOP_block, 0);
- do_softirq();
- }
- status = PAL_STATUS_SUCCESS;
- }
- break;
- case PAL_PLATFORM_ADDR:
- if (VMX_DOMAIN(current))
- status = PAL_STATUS_SUCCESS;
- break;
- case PAL_FIXED_ADDR:
- status = PAL_STATUS_SUCCESS;
- r9 = current->vcpu_id;
- break;
- case PAL_PREFETCH_VISIBILITY:
- status = ia64_pal_prefetch_visibility(in1);
- if (status == 0) {
- /* must be performed on all remote processors
- in the coherence domain. */
- smp_call_function(remote_pal_prefetch_visibility,
- (void *)in1, 1);
- status = 1; /* no more necessary on remote processor */
- }
- break;
- case PAL_MC_DRAIN:
- status = ia64_pal_mc_drain();
- /* FIXME: All vcpus likely call PAL_MC_DRAIN.
- That causes the congestion. */
- smp_call_function(remote_pal_mc_drain, NULL, 1);
- break;
- case PAL_BRAND_INFO:
- if (in1 == 0) {
- char brand_info[128];
- status = ia64_pal_get_brand_info(brand_info);
- if (status != PAL_STATUS_SUCCESS)
- break;
- if (safe_copy_to_guest(in2, brand_info,
- sizeof(brand_info))) {
- status = PAL_STATUS_EINVAL;
- goto fail_to_copy;
- }
- } else {
- status = PAL_STATUS_EINVAL;
- }
- break;
- case PAL_LOGICAL_TO_PHYSICAL:
- case PAL_GET_PSTATE:
- case PAL_CACHE_SHARED_INFO:
- /* Optional, no need to complain about being unimplemented */
- break;
- default:
- printk("%s: Unimplemented PAL Call %lu\n", __func__, index);
- break;
- }
- return ((struct ia64_pal_retval) {status, r9, r10, r11});
-
-fail_to_copy:
- gdprintk(XENLOG_WARNING,
- "PAL(%ld) fail to copy!!! args 0x%lx 0x%lx 0x%lx\n",
- index, in1, in2, in3);
- return ((struct ia64_pal_retval) {status, r9, r10, r11});
-}
-
-// given a current domain (virtual or metaphysical) address, return the virtual address
-static unsigned long
-efi_translate_domain_addr(unsigned long domain_addr, IA64FAULT *fault,
- struct page_info** page)
-{
- struct vcpu *v = current;
- unsigned long mpaddr = domain_addr;
- unsigned long virt;
- *fault = IA64_NO_FAULT;
-
-again:
- if (v->domain->arch.sal_data->efi_virt_mode) {
- *fault = vcpu_tpa(v, domain_addr, &mpaddr);
- if (*fault != IA64_NO_FAULT) return 0;
- }
-
- virt = (unsigned long)domain_mpa_to_imva(v->domain, mpaddr);
- *page = virt_to_page(virt);
- if (get_page(*page, current->domain) == 0) {
- if (page_get_owner(*page) != current->domain) {
- // which code is appropriate?
- *fault = IA64_FAULT;
- return 0;
- }
- goto again;
- }
-
- return virt;
-}
-
-static efi_status_t
-efi_emulate_get_time(
- unsigned long tv_addr, unsigned long tc_addr,
- IA64FAULT *fault)
-{
- unsigned long tv, tc = 0;
- struct page_info *tv_page = NULL;
- struct page_info *tc_page = NULL;
- efi_status_t status = 0;
- efi_time_t *tvp;
- struct tm timeptr;
- unsigned long xtimesec;
-
- tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- if (tc_addr) {
- tc = efi_translate_domain_addr(tc_addr, fault, &tc_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- }
-
- spin_lock(&efi_time_services_lock);
- status = (*efi.get_time)((efi_time_t *) tv, (efi_time_cap_t *) tc);
- tvp = (efi_time_t *)tv;
- xtimesec = mktime(tvp->year, tvp->month, tvp->day, tvp->hour,
- tvp->minute, tvp->second);
- xtimesec += current->domain->time_offset_seconds;
- timeptr = gmtime(xtimesec);
- tvp->second = timeptr.tm_sec;
- tvp->minute = timeptr.tm_min;
- tvp->hour = timeptr.tm_hour;
- tvp->day = timeptr.tm_mday;
- tvp->month = timeptr.tm_mon + 1;
- tvp->year = timeptr.tm_year + 1900;
- spin_unlock(&efi_time_services_lock);
-
-errout:
- if (tc_page != NULL)
- put_page(tc_page);
- if (tv_page != NULL)
- put_page(tv_page);
-
- return status;
-}
-
-void domain_set_time_offset(struct domain *d, int32_t time_offset_seconds)
-{
- d->time_offset_seconds = time_offset_seconds;
-}
-
-static efi_status_t
-efi_emulate_set_time(
- unsigned long tv_addr, IA64FAULT *fault)
-{
- unsigned long tv;
- struct page_info *tv_page = NULL;
- efi_status_t status = 0;
-
- if (current->domain != dom0)
- return EFI_UNSUPPORTED;
-
- tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
-
- spin_lock(&efi_time_services_lock);
- status = (*efi.set_time)((efi_time_t *)tv);
- spin_unlock(&efi_time_services_lock);
-
-errout:
- if (tv_page != NULL)
- put_page(tv_page);
-
- return status;
-}
-
-static efi_status_t
-efi_emulate_get_wakeup_time(
- unsigned long e_addr, unsigned long p_addr,
- unsigned long tv_addr, IA64FAULT *fault)
-{
- unsigned long enabled, pending, tv;
- struct page_info *e_page = NULL, *p_page = NULL,
- *tv_page = NULL;
- efi_status_t status = 0;
-
- if (current->domain != dom0)
- return EFI_UNSUPPORTED;
-
- if (!e_addr || !p_addr || !tv_addr)
- return EFI_INVALID_PARAMETER;
-
- enabled = efi_translate_domain_addr(e_addr, fault, &e_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- pending = efi_translate_domain_addr(p_addr, fault, &p_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
-
- spin_lock(&efi_time_services_lock);
- status = (*efi.get_wakeup_time)((efi_bool_t *)enabled,
- (efi_bool_t *)pending,
- (efi_time_t *)tv);
- spin_unlock(&efi_time_services_lock);
-
-errout:
- if (e_page != NULL)
- put_page(e_page);
- if (p_page != NULL)
- put_page(p_page);
- if (tv_page != NULL)
- put_page(tv_page);
-
- return status;
-}
-
-static efi_status_t
-efi_emulate_set_wakeup_time(
- unsigned long enabled, unsigned long tv_addr,
- IA64FAULT *fault)
-{
- unsigned long tv = 0;
- struct page_info *tv_page = NULL;
- efi_status_t status = 0;
-
- if (current->domain != dom0)
- return EFI_UNSUPPORTED;
-
- if (tv_addr) {
- tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- }
-
- spin_lock(&efi_time_services_lock);
- status = (*efi.set_wakeup_time)((efi_bool_t)enabled,
- (efi_time_t *)tv);
- spin_unlock(&efi_time_services_lock);
-
-errout:
- if (tv_page != NULL)
- put_page(tv_page);
-
- return status;
-}
-
-static efi_status_t
-efi_emulate_get_variable(
- unsigned long name_addr, unsigned long vendor_addr,
- unsigned long attr_addr, unsigned long data_size_addr,
- unsigned long data_addr, IA64FAULT *fault)
-{
- unsigned long name, vendor, attr = 0, data_size, data;
- struct page_info *name_page = NULL, *vendor_page = NULL,
- *attr_page = NULL, *data_size_page = NULL,
- *data_page = NULL;
- efi_status_t status = 0;
-
- if (current->domain != dom0)
- return EFI_UNSUPPORTED;
-
- name = efi_translate_domain_addr(name_addr, fault, &name_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- data_size = efi_translate_domain_addr(data_size_addr, fault,
- &data_size_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- data = efi_translate_domain_addr(data_addr, fault, &data_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- if (attr_addr) {
- attr = efi_translate_domain_addr(attr_addr, fault, &attr_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- }
-
- status = (*efi.get_variable)((efi_char16_t *)name,
- (efi_guid_t *)vendor,
- (u32 *)attr,
- (unsigned long *)data_size,
- (void *)data);
-
-errout:
- if (name_page != NULL)
- put_page(name_page);
- if (vendor_page != NULL)
- put_page(vendor_page);
- if (attr_page != NULL)
- put_page(attr_page);
- if (data_size_page != NULL)
- put_page(data_size_page);
- if (data_page != NULL)
- put_page(data_page);
-
- return status;
-}
-
-static efi_status_t
-efi_emulate_get_next_variable(
- unsigned long name_size_addr, unsigned long name_addr,
- unsigned long vendor_addr, IA64FAULT *fault)
-{
- unsigned long name_size, name, vendor;
- struct page_info *name_size_page = NULL, *name_page = NULL,
- *vendor_page = NULL;
- efi_status_t status = 0;
-
- if (current->domain != dom0)
- return EFI_UNSUPPORTED;
-
- name_size = efi_translate_domain_addr(name_size_addr, fault,
- &name_size_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- name = efi_translate_domain_addr(name_addr, fault, &name_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
-
- status = (*efi.get_next_variable)((unsigned long *)name_size,
- (efi_char16_t *)name,
- (efi_guid_t *)vendor);
-
-errout:
- if (name_size_page != NULL)
- put_page(name_size_page);
- if (name_page != NULL)
- put_page(name_page);
- if (vendor_page != NULL)
- put_page(vendor_page);
-
- return status;
-}
-
-static efi_status_t
-efi_emulate_set_variable(
- unsigned long name_addr, unsigned long vendor_addr,
- unsigned long attr, unsigned long data_size,
- unsigned long data_addr, IA64FAULT *fault)
-{
- unsigned long name, vendor, data;
- struct page_info *name_page = NULL, *vendor_page = NULL,
- *data_page = NULL;
- efi_status_t status = 0;
-
- if (current->domain != dom0)
- return EFI_UNSUPPORTED;
-
- name = efi_translate_domain_addr(name_addr, fault, &name_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
- data = efi_translate_domain_addr(data_addr, fault, &data_page);
- if (*fault != IA64_NO_FAULT)
- goto errout;
-
- status = (*efi.set_variable)((efi_char16_t *)name,
- (efi_guid_t *)vendor,
- attr,
- data_size,
- (void *)data);
-
-errout:
- if (name_page != NULL)
- put_page(name_page);
- if (vendor_page != NULL)
- put_page(vendor_page);
- if (data_page != NULL)
- put_page(data_page);
-
- return status;
-}
-
-static efi_status_t
-efi_emulate_set_virtual_address_map(
- unsigned long memory_map_size, unsigned long descriptor_size,
- u32 descriptor_version, efi_memory_desc_t *virtual_map)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t entry, *md = &entry;
- u64 efi_desc_size;
-
- unsigned long *vfn;
- struct domain *d = current->domain;
- efi_runtime_services_t *efi_runtime = d->arch.efi_runtime;
- fpswa_interface_t *fpswa_inf = d->arch.fpswa_inf;
-
- if (descriptor_version != EFI_MEMDESC_VERSION) {
- printk ("efi_emulate_set_virtual_address_map: memory "
- "descriptor version unmatched (%d vs %d)\n",
- (int)descriptor_version, EFI_MEMDESC_VERSION);
- return EFI_INVALID_PARAMETER;
- }
-
- if (descriptor_size != sizeof(efi_memory_desc_t)) {
- printk ("efi_emulate_set_virtual_address_map: memory descriptor size unmatched\n");
- return EFI_INVALID_PARAMETER;
- }
-
- if (d->arch.sal_data->efi_virt_mode)
- return EFI_UNSUPPORTED;
-
- efi_map_start = virtual_map;
- efi_map_end = efi_map_start + memory_map_size;
- efi_desc_size = sizeof(efi_memory_desc_t);
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- struct page_info *efi_runtime_page = NULL;
- struct page_info *fpswa_inf_page = NULL;
- struct page_info *fw_table_page = NULL;
-
- if (copy_from_user(&entry, p, sizeof(efi_memory_desc_t))) {
- printk ("efi_emulate_set_virtual_address_map: copy_from_user() fault. addr=0x%p\n", p);
- return EFI_UNSUPPORTED;
- }
-
- /* skip over non-PAL_CODE memory descriptors; EFI_RUNTIME is included in PAL_CODE. */
- if (md->type != EFI_PAL_CODE)
- continue;
-
- /* get pages to prevend them from being freed
- * during touching them.
- * those entres are in [FW_TABLES_BASE_PADDR, ...]
- * see dom_fw.h for its layout.
- */
- efi_runtime_page = virt_to_page(efi_runtime);
- fpswa_inf_page = virt_to_page(fpswa_inf);
- fw_table_page = virt_to_page(
- domain_mpa_to_imva(d, FW_TABLES_BASE_PADDR));
- if (get_page(efi_runtime_page, d) == 0)
- return EFI_INVALID_PARAMETER;
- if (get_page(fpswa_inf_page, d) == 0) {
- put_page(efi_runtime_page);
- return EFI_INVALID_PARAMETER;
- }
- if (get_page(fw_table_page, d) == 0) {
- put_page(fpswa_inf_page);
- put_page(efi_runtime_page);
- return EFI_INVALID_PARAMETER;
- }
-
-#define EFI_HYPERCALL_PATCH_TO_VIRT(tgt,call) \
- do { \
- vfn = (unsigned long *) domain_mpa_to_imva(d, tgt); \
- *vfn++ = FW_HYPERCALL_##call##_INDEX * 16UL + md->virt_addr; \
- *vfn++ = 0; \
- } while (0)
-
- EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_time,EFI_GET_TIME);
- EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_time,EFI_SET_TIME);
- EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_wakeup_time,EFI_GET_WAKEUP_TIME);
- EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_wakeup_time,EFI_SET_WAKEUP_TIME);
- EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_virtual_address_map,EFI_SET_VIRTUAL_ADDRESS_MAP);
- EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_variable,EFI_GET_VARIABLE);
- EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_variable,EFI_GET_NEXT_VARIABLE);
- EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_variable,EFI_SET_VARIABLE);
- EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
- EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->reset_system,EFI_RESET_SYSTEM);
-
- vfn = (unsigned long *) domain_mpa_to_imva(d, (unsigned long) fpswa_inf->fpswa);
- *vfn++ = FW_HYPERCALL_FPSWA_PATCH_INDEX * 16UL + md->virt_addr;
- *vfn = 0;
- fpswa_inf->fpswa = (void *) (FW_HYPERCALL_FPSWA_ENTRY_INDEX * 16UL + md->virt_addr);
-
- put_page(fw_table_page);
- put_page(fpswa_inf_page);
- put_page(efi_runtime_page);
- break;
- }
-
- /* The virtual address map has been applied. */
- d->arch.sal_data->efi_virt_mode = 1;
-
- return EFI_SUCCESS;
-}
-
-efi_status_t
-efi_emulator (struct pt_regs *regs, IA64FAULT *fault)
-{
- struct vcpu *v = current;
- efi_status_t status;
-
- debugger_event(XEN_IA64_DEBUG_ON_EFI);
-
- *fault = IA64_NO_FAULT;
-
- switch (regs->r2) {
- case FW_HYPERCALL_EFI_RESET_SYSTEM:
- {
- u8 reason;
- unsigned long val = vcpu_get_gr(v,32);
- switch (val)
- {
- case EFI_RESET_SHUTDOWN:
- reason = SHUTDOWN_poweroff;
- break;
- case EFI_RESET_COLD:
- case EFI_RESET_WARM:
- default:
- reason = SHUTDOWN_reboot;
- break;
- }
- domain_shutdown (current->domain, reason);
- }
- status = EFI_UNSUPPORTED;
- break;
- case FW_HYPERCALL_EFI_GET_TIME:
- status = efi_emulate_get_time (
- vcpu_get_gr(v,32),
- vcpu_get_gr(v,33),
- fault);
- break;
- case FW_HYPERCALL_EFI_SET_TIME:
- status = efi_emulate_set_time (
- vcpu_get_gr(v,32),
- fault);
- break;
- case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
- status = efi_emulate_get_wakeup_time (
- vcpu_get_gr(v,32),
- vcpu_get_gr(v,33),
- vcpu_get_gr(v,34),
- fault);
- break;
- case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
- status = efi_emulate_set_wakeup_time (
- vcpu_get_gr(v,32),
- vcpu_get_gr(v,33),
- fault);
- break;
- case FW_HYPERCALL_EFI_GET_VARIABLE:
- status = efi_emulate_get_variable (
- vcpu_get_gr(v,32),
- vcpu_get_gr(v,33),
- vcpu_get_gr(v,34),
- vcpu_get_gr(v,35),
- vcpu_get_gr(v,36),
- fault);
- break;
- case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
- status = efi_emulate_get_next_variable (
- vcpu_get_gr(v,32),
- vcpu_get_gr(v,33),
- vcpu_get_gr(v,34),
- fault);
- break;
- case FW_HYPERCALL_EFI_SET_VARIABLE:
- status = efi_emulate_set_variable (
- vcpu_get_gr(v,32),
- vcpu_get_gr(v,33),
- vcpu_get_gr(v,34),
- vcpu_get_gr(v,35),
- vcpu_get_gr(v,36),
- fault);
- break;
- case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
- status = efi_emulate_set_virtual_address_map (
- vcpu_get_gr(v,32),
- vcpu_get_gr(v,33),
- (u32) vcpu_get_gr(v,34),
- (efi_memory_desc_t *) vcpu_get_gr(v,35));
- break;
- case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
- // FIXME: need fixes in efi.h from 2.6.9
- status = EFI_UNSUPPORTED;
- break;
- default:
- printk("unknown ia64 fw hypercall %lx\n", regs->r2);
- status = EFI_UNSUPPORTED;
- }
-
- return status;
-}
-
-void
-do_ssc(unsigned long ssc, struct pt_regs *regs)
-{
- unsigned long arg0, arg1, arg2, arg3, retval;
- char buf[2];
-/**/ static int last_fd, last_count; // FIXME FIXME FIXME
-/**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
-/**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
-
- arg0 = vcpu_get_gr(current,32);
- switch(ssc) {
- case SSC_PUTCHAR:
- buf[0] = arg0;
- buf[1] = '\0';
- printk(buf);
- break;
- case SSC_GETCHAR:
- retval = ia64_ssc(0,0,0,0,ssc);
- vcpu_set_gr(current,8,retval,0);
- break;
- case SSC_WAIT_COMPLETION:
- if (arg0) { // metaphysical address
-
- arg0 = translate_domain_mpaddr(arg0, NULL);
-/**/ stat = (struct ssc_disk_stat *)__va(arg0);
-///**/ if (stat->fd == last_fd) stat->count = last_count;
-/**/ stat->count = last_count;
-//if (last_count >= PAGE_SIZE) printk("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
-///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
-/**/ retval = 0;
- }
- else retval = -1L;
- vcpu_set_gr(current,8,retval,0);
- break;
- case SSC_OPEN:
- arg1 = vcpu_get_gr(current,33); // access rights
- if (!running_on_sim) {
- printk("SSC_OPEN, not implemented on hardware. (ignoring...)\n");
- arg0 = 0;
- }
- if (arg0) { // metaphysical address
- arg0 = translate_domain_mpaddr(arg0, NULL);
- retval = ia64_ssc(arg0,arg1,0,0,ssc);
- }
- else retval = -1L;
- vcpu_set_gr(current,8,retval,0);
- break;
- case SSC_WRITE:
- case SSC_READ:
-//if (ssc == SSC_WRITE) printk("DOING AN SSC_WRITE\n");
- arg1 = vcpu_get_gr(current,33);
- arg2 = vcpu_get_gr(current,34);
- arg3 = vcpu_get_gr(current,35);
- if (arg2) { // metaphysical address of descriptor
- struct ssc_disk_req *req;
- unsigned long mpaddr;
- long len;
-
- arg2 = translate_domain_mpaddr(arg2, NULL);
- req = (struct ssc_disk_req *) __va(arg2);
- req->len &= 0xffffffffL; // avoid strange bug
- len = req->len;
-/**/ last_fd = arg1;
-/**/ last_count = len;
- mpaddr = req->addr;
-//if (last_count >= PAGE_SIZE) printk("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
- retval = 0;
- if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
- // do partial page first
- req->addr = translate_domain_mpaddr(mpaddr, NULL);
- req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
- len -= req->len; mpaddr += req->len;
- retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
- arg3 += req->len; // file offset
-/**/ last_stat.fd = last_fd;
-/**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
-//if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
- }
- if (retval >= 0) while (len > 0) {
- req->addr = translate_domain_mpaddr(mpaddr, NULL);
- req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
- len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
- retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
- arg3 += req->len; // file offset
-// TEMP REMOVED AGAIN arg3 += req->len; // file offset
-/**/ last_stat.fd = last_fd;
-/**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
-//if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
- }
- // set it back to the original value
- req->len = last_count;
- }
- else retval = -1L;
- vcpu_set_gr(current,8,retval,0);
-//if (last_count >= PAGE_SIZE) printk("retval=%x\n",retval);
- break;
- case SSC_CONNECT_INTERRUPT:
- arg1 = vcpu_get_gr(current,33);
- arg2 = vcpu_get_gr(current,34);
- arg3 = vcpu_get_gr(current,35);
- if (!running_on_sim) {
- printk("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n");
- break;
- }
- (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
- break;
- case SSC_NETDEV_PROBE:
- vcpu_set_gr(current,8,-1L,0);
- break;
- default:
- panic_domain(regs,
- "%s: bad ssc code %lx, iip=0x%lx, b0=0x%lx\n",
- __func__, ssc, regs->cr_iip, regs->b0);
- break;
- }
- vcpu_increment_iip(current);
-}
diff --git a/xen/arch/ia64/xen/gdbstub.c b/xen/arch/ia64/xen/gdbstub.c
deleted file mode 100644
index 925d8ab508..0000000000
--- a/xen/arch/ia64/xen/gdbstub.c
+++ /dev/null
@@ -1,819 +0,0 @@
-/*
- * ia64-specific cdb routines
- * cdb xen/ia64 by Isaku Yamahta <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * some routines are stolen from kgdb/ia64.
- */
-/*
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- */
-
-/*
- * Copyright (C) 2000-2001 VERITAS Software Corporation.
- */
-/*
- * Contributor: Lake Stevens Instrument Division$
- * Written by: Glenn Engel $
- * Updated by: Amit Kale<akale@veritas.com>
- * Modified for 386 by Jim Kingdon, Cygnus Support.
- * Origianl kgdb, compatibility with 2.1.xx kernel by David Grothe <dave@gcom.com>
- *
- */
-
-
-#include <xen/lib.h>
-#include <xen/mm.h>
-#include <asm/byteorder.h>
-#include <asm/debugger.h>
-#include <asm/uaccess.h>
-
-#define USE_UNWIND
-
-#ifdef USE_UNWIND
-#include <asm/unwind.h>
-#endif
-
-/* Printk isn't particularly safe just after we've trapped to the
- debugger. so avoid it. */
-#define dbg_printk(...)
-//#define dbg_printk(...) printk(__VA_ARGS__)
-
-u16
-gdb_arch_signal_num(struct cpu_user_regs *regs, unsigned long cookie)
-{
- /* XXX */
- return 1;
-}
-
-void
-gdb_arch_read_reg_array(struct cpu_user_regs *regs, struct gdb_context *ctx)
-{
- gdb_send_reply("", ctx);
-}
-
-void
-gdb_arch_write_reg(unsigned long regnum, unsigned long val,
- struct cpu_user_regs *regs, struct gdb_context *ctx)
-{
- gdb_send_reply("", ctx);
-}
-
-void
-gdb_arch_write_reg_array(struct cpu_user_regs *regs, const char* buf,
- struct gdb_context *ctx)
-{
- /* XXX TODO */
- gdb_send_reply("E02", ctx);
-}
-
-/* Like copy_from_user, but safe to call with interrupts disabled.
- Trust me, and don't look behind the curtain. */
-unsigned
-gdb_arch_copy_from_user(void *dest, const void *src, unsigned len)
-{
- int val;
- __asm__ __volatile__(
- "cmp4.eq p6, p0 = r0, %1\n"
- "(p6) br.cond.dptk 2f\n"
- "[1:]\n"
- ".xdata4 \"__ex_table\", 99f-., 2f-.;\n"
- "[99:] ld1 %0 = [%3], 1\n"
- ";;\n"
- ".xdata4 \"__ex_table\", 99f-., 2f-.;\n"
- "[99:] st1 [%2] = %0, 1\n"
- "adds %1 = -1, %1\n"
- ";;\n"
- "cmp4.eq p0, p6 = r0, %1\n"
- "(p6) br.cond.dptk 1b\n"
- "[2:]\n"
- : "=r"(val), "=r"(len), "=r"(dest), "=r"(src)
- : "1"(len), "2"(dest), "3"(src)
- : "memory", "p6");
- return len;
-}
-
-unsigned int
-gdb_arch_copy_to_user(void *dest, const void *src, unsigned len)
-{
- /* XXX */
- return len;
-}
-
-#define NUM_REGS 590
-#define REGISTER_BYTES (NUM_REGS*8+128*8)
-#define REGISTER_BYTE(N) (((N) * 8) \
- + ((N) <= IA64_FR0_REGNUM ? \
- 0 : 8 * (((N) > IA64_FR127_REGNUM) ? 128 : (N) - IA64_FR0_REGNUM)))
-#define REGISTER_SIZE(N) \
- (((N) >= IA64_FR0_REGNUM && (N) <= IA64_FR127_REGNUM) ? 16 : 8)
-#define IA64_GR0_REGNUM 0
-#define IA64_FR0_REGNUM 128
-#define IA64_FR127_REGNUM (IA64_FR0_REGNUM+127)
-#define IA64_PR0_REGNUM 256
-#define IA64_BR0_REGNUM 320
-#define IA64_VFP_REGNUM 328
-#define IA64_PR_REGNUM 330
-#define IA64_IP_REGNUM 331
-#define IA64_PSR_REGNUM 332
-#define IA64_CFM_REGNUM 333
-#define IA64_AR0_REGNUM 334
-#define IA64_NAT0_REGNUM 462
-#define IA64_NAT31_REGNUM (IA64_NAT0_REGNUM+31)
-#define IA64_NAT32_REGNUM (IA64_NAT0_REGNUM+32)
-#define IA64_RSC_REGNUM (IA64_AR0_REGNUM+16)
-#define IA64_BSP_REGNUM (IA64_AR0_REGNUM+17)
-#define IA64_BSPSTORE_REGNUM (IA64_AR0_REGNUM+18)
-#define IA64_RNAT_REGNUM (IA64_AR0_REGNUM+19)
-#define IA64_FCR_REGNUM (IA64_AR0_REGNUM+21)
-#define IA64_EFLAG_REGNUM (IA64_AR0_REGNUM+24)
-#define IA64_CSD_REGNUM (IA64_AR0_REGNUM+25)
-#define IA64_SSD_REGNUM (IA64_AR0_REGNUM+26)
-#define IA64_CFLG_REGNUM (IA64_AR0_REGNUM+27)
-#define IA64_FSR_REGNUM (IA64_AR0_REGNUM+28)
-#define IA64_FIR_REGNUM (IA64_AR0_REGNUM+29)
-#define IA64_FDR_REGNUM (IA64_AR0_REGNUM+30)
-#define IA64_CCV_REGNUM (IA64_AR0_REGNUM+32)
-#define IA64_UNAT_REGNUM (IA64_AR0_REGNUM+36)
-#define IA64_FPSR_REGNUM (IA64_AR0_REGNUM+40)
-#define IA64_ITC_REGNUM (IA64_AR0_REGNUM+44)
-#define IA64_PFS_REGNUM (IA64_AR0_REGNUM+64)
-#define IA64_LC_REGNUM (IA64_AR0_REGNUM+65)
-#define IA64_EC_REGNUM (IA64_AR0_REGNUM+66)
-
-#ifndef USE_UNWIND
-struct regs_to_cpu_user_resgs_index {
- unsigned int reg;
- unsigned int ptregoff;
-};
-
-#define ptoff(V) ((unsigned int)&((struct cpu_user_regs*)0x0)->V)
-
-// gr
-static const struct regs_to_cpu_user_resgs_index
-gr_reg_to_cpu_user_regs_index[] = {
- {IA64_GR0_REGNUM + 8, ptoff(r8)},
- {IA64_GR0_REGNUM + 9, ptoff(r9)},
- {IA64_GR0_REGNUM + 10, ptoff(r10)},
- {IA64_GR0_REGNUM + 11, ptoff(r11)},
- {IA64_GR0_REGNUM + 1, ptoff(r1)},
- {IA64_GR0_REGNUM + 12, ptoff(r12)},
- {IA64_GR0_REGNUM + 13, ptoff(r13)},
- {IA64_GR0_REGNUM + 15, ptoff(r15)},
-
- {IA64_GR0_REGNUM + 14, ptoff(r14)},
- {IA64_GR0_REGNUM + 2, ptoff(r2)},
- {IA64_GR0_REGNUM + 3, ptoff(r3)},
- {IA64_GR0_REGNUM + 16, ptoff(r16)},
- {IA64_GR0_REGNUM + 17, ptoff(r17)},
- {IA64_GR0_REGNUM + 18, ptoff(r18)},
- {IA64_GR0_REGNUM + 19, ptoff(r19)},
- {IA64_GR0_REGNUM + 20, ptoff(r20)},
- {IA64_GR0_REGNUM + 21, ptoff(r21)},
- {IA64_GR0_REGNUM + 22, ptoff(r22)},
- {IA64_GR0_REGNUM + 23, ptoff(r23)},
- {IA64_GR0_REGNUM + 24, ptoff(r24)},
- {IA64_GR0_REGNUM + 25, ptoff(r25)},
- {IA64_GR0_REGNUM + 26, ptoff(r26)},
- {IA64_GR0_REGNUM + 27, ptoff(r27)},
- {IA64_GR0_REGNUM + 28, ptoff(r28)},
- {IA64_GR0_REGNUM + 29, ptoff(r29)},
- {IA64_GR0_REGNUM + 30, ptoff(r30)},
- {IA64_GR0_REGNUM + 31, ptoff(r31)},
-
- {IA64_GR0_REGNUM + 4, ptoff(r4)},
- {IA64_GR0_REGNUM + 5, ptoff(r5)},
- {IA64_GR0_REGNUM + 6, ptoff(r6)},
- {IA64_GR0_REGNUM + 7, ptoff(r7)},
-};
-static const int gr_reg_to_cpu_user_regs_index_max =
- sizeof(gr_reg_to_cpu_user_regs_index) /
- sizeof(gr_reg_to_cpu_user_regs_index[0]);
-
-// br
-static const struct regs_to_cpu_user_resgs_index
-br_reg_to_cpu_user_regs_index[] = {
- {IA64_BR0_REGNUM + 0, ptoff(b0)},
- {IA64_BR0_REGNUM + 6, ptoff(b6)},
- {IA64_BR0_REGNUM + 7, ptoff(b7)},
-};
-static const int br_reg_to_cpu_user_regs_index_max =
- sizeof(br_reg_to_cpu_user_regs_index) /
- sizeof(br_reg_to_cpu_user_regs_index[0]);
-
-// f
-static const struct regs_to_cpu_user_resgs_index
-fr_reg_to_cpu_user_regs_index[] = {
- {IA64_FR0_REGNUM + 6, ptoff(f6)},
- {IA64_FR0_REGNUM + 7, ptoff(f7)},
- {IA64_FR0_REGNUM + 8, ptoff(f8)},
- {IA64_FR0_REGNUM + 9, ptoff(f9)},
- {IA64_FR0_REGNUM + 10, ptoff(f10)},
- {IA64_FR0_REGNUM + 11, ptoff(f11)},
-};
-static const int fr_reg_to_cpu_user_regs_index_max =
- sizeof(fr_reg_to_cpu_user_regs_index) /
- sizeof(fr_reg_to_cpu_user_regs_index[0]);
-
-
-void
-gdb_arch_read_reg(unsigned long regnum, struct cpu_user_regs *regs,
- struct gdb_context *ctx)
-{
- unsigned long reg = IA64_IP_REGNUM;
- char buf[9];
- int i;
-
- dbg_printk("Register read regnum = 0x%lx\n", regnum);
- if (IA64_GR0_REGNUM <= regnum && regnum <= IA64_GR0_REGNUM + 31) {
- for (i = 0; i < gr_reg_to_cpu_user_regs_index_max; i++) {
- if (gr_reg_to_cpu_user_regs_index[i].reg == regnum) {
- reg = *(unsigned long*)(((char*)regs) + gr_reg_to_cpu_user_regs_index[i].ptregoff);
- break;
- }
- }
- if (i == gr_reg_to_cpu_user_regs_index_max) {
- goto out_err;
- }
- } else if (IA64_BR0_REGNUM <= regnum && regnum <= IA64_BR0_REGNUM + 7) {
- for (i = 0; i < br_reg_to_cpu_user_regs_index_max; i++) {
- if (br_reg_to_cpu_user_regs_index[i].reg == regnum) {
- reg = *(unsigned long*)(((char*)regs) + br_reg_to_cpu_user_regs_index[i].ptregoff);
- break;
- }
- }
- if (i == br_reg_to_cpu_user_regs_index_max) {
- goto out_err;
- }
- } else if (IA64_FR0_REGNUM + 6 <= regnum && regnum <= IA64_FR0_REGNUM + 11) {
- for (i = 0; i < fr_reg_to_cpu_user_regs_index_max; i++) {
- if (fr_reg_to_cpu_user_regs_index[i].reg == regnum) {
- reg = *(unsigned long*)(((char*)regs) + fr_reg_to_cpu_user_regs_index[i].ptregoff);
- break;
- }
- }
- if (i == fr_reg_to_cpu_user_regs_index_max) {
- goto out_err;
- }
- } else if (regnum == IA64_CSD_REGNUM) {
- reg = regs->ar_csd;
- } else if (regnum == IA64_SSD_REGNUM) {
- reg = regs->ar_ssd;
- } else if (regnum == IA64_PSR_REGNUM) {
- reg = regs->cr_ipsr;
- } else if (regnum == IA64_IP_REGNUM) {
- reg = regs->cr_iip;
- } else if (regnum == IA64_CFM_REGNUM) {
- reg = regs->cr_ifs;
- } else if (regnum == IA64_UNAT_REGNUM) {
- reg = regs->ar_unat;
- } else if (regnum == IA64_PFS_REGNUM) {
- reg = regs->ar_pfs;
- } else if (regnum == IA64_RSC_REGNUM) {
- reg = regs->ar_rsc;
- } else if (regnum == IA64_RNAT_REGNUM) {
- reg = regs->ar_rnat;
- } else if (regnum == IA64_BSPSTORE_REGNUM) {
- reg = regs->ar_bspstore;
- } else if (regnum == IA64_PR_REGNUM) {
- reg = regs->pr;
- } else if (regnum == IA64_FPSR_REGNUM) {
- reg = regs->ar_fpsr;
- } else if (regnum == IA64_CCV_REGNUM) {
- reg = regs->ar_ccv;
- } else {
- // emul_unat, rfi_pfs
- goto out_err;
- }
-
- dbg_printk("Register read regnum = 0x%lx, val = 0x%lx\n", regnum, reg);
- snprintf(buf, sizeof(buf), "%.08lx", swab64(reg));
-out:
- return gdb_send_reply(buf, ctx);
-
-out_err:
- dbg_printk("Register read unsupported regnum = 0x%lx\n", regnum);
- safe_strcpy(buf, "x");
- goto out;
-}
-#else
-
-#define ptoff(V) ((unsigned int) &((struct pt_regs *)0x0)->V)
-struct reg_to_ptreg_index {
- unsigned int reg;
- unsigned int ptregoff;
-};
-
-static struct reg_to_ptreg_index gr_reg_to_ptreg_index[] = {
- {IA64_GR0_REGNUM + 1, ptoff(r1)},
- {IA64_GR0_REGNUM + 2, ptoff(r2)},
- {IA64_GR0_REGNUM + 3, ptoff(r3)},
- {IA64_GR0_REGNUM + 8, ptoff(r8)},
- {IA64_GR0_REGNUM + 9, ptoff(r9)},
- {IA64_GR0_REGNUM + 10, ptoff(r10)},
- {IA64_GR0_REGNUM + 11, ptoff(r11)},
- {IA64_GR0_REGNUM + 12, ptoff(r12)},
- {IA64_GR0_REGNUM + 13, ptoff(r13)},
- {IA64_GR0_REGNUM + 14, ptoff(r14)},
- {IA64_GR0_REGNUM + 15, ptoff(r15)},
- {IA64_GR0_REGNUM + 16, ptoff(r16)},
- {IA64_GR0_REGNUM + 17, ptoff(r17)},
- {IA64_GR0_REGNUM + 18, ptoff(r18)},
- {IA64_GR0_REGNUM + 19, ptoff(r19)},
- {IA64_GR0_REGNUM + 20, ptoff(r20)},
- {IA64_GR0_REGNUM + 21, ptoff(r21)},
- {IA64_GR0_REGNUM + 22, ptoff(r22)},
- {IA64_GR0_REGNUM + 23, ptoff(r23)},
- {IA64_GR0_REGNUM + 24, ptoff(r24)},
- {IA64_GR0_REGNUM + 25, ptoff(r25)},
- {IA64_GR0_REGNUM + 26, ptoff(r26)},
- {IA64_GR0_REGNUM + 27, ptoff(r27)},
- {IA64_GR0_REGNUM + 28, ptoff(r28)},
- {IA64_GR0_REGNUM + 29, ptoff(r29)},
- {IA64_GR0_REGNUM + 30, ptoff(r30)},
- {IA64_GR0_REGNUM + 31, ptoff(r31)},
-};
-
-static struct reg_to_ptreg_index br_reg_to_ptreg_index[] = {
- {IA64_BR0_REGNUM, ptoff(b0)},
- {IA64_BR0_REGNUM + 6, ptoff(b6)},
- {IA64_BR0_REGNUM + 7, ptoff(b7)},
-};
-
-static struct reg_to_ptreg_index ar_reg_to_ptreg_index[] = {
- {IA64_PFS_REGNUM, ptoff(ar_pfs)},
- {IA64_UNAT_REGNUM, ptoff(ar_unat)},
- {IA64_RNAT_REGNUM, ptoff(ar_rnat)},
- {IA64_BSPSTORE_REGNUM, ptoff(ar_bspstore)},
- {IA64_RSC_REGNUM, ptoff(ar_rsc)},
- {IA64_CSD_REGNUM, ptoff(ar_csd)},
- {IA64_SSD_REGNUM, ptoff(ar_ssd)},
- {IA64_FPSR_REGNUM, ptoff(ar_fpsr)},
- {IA64_CCV_REGNUM, ptoff(ar_ccv)},
-};
-
-#ifndef XEN
-extern atomic_t cpu_doing_single_step;
-#endif
-
-static int kgdb_gr_reg(int regnum, struct unw_frame_info *info,
- unsigned long *reg, int rw)
-{
- char nat;
-
- if ((regnum >= IA64_GR0_REGNUM && regnum <= (IA64_GR0_REGNUM + 1)) ||
- (regnum >= (IA64_GR0_REGNUM + 4) &&
- regnum <= (IA64_GR0_REGNUM + 7)))
- return !unw_access_gr(info, regnum - IA64_GR0_REGNUM,
- reg, &nat, rw);
- else
- return 0;
-}
-static int kgdb_gr_ptreg(int regnum, struct pt_regs * ptregs,
- struct unw_frame_info *info, unsigned long *reg, int rw)
-{
- int i, result = 1;
- char nat;
-
- if (!((regnum >= (IA64_GR0_REGNUM + 2) &&
- regnum <= (IA64_GR0_REGNUM + 3)) ||
- (regnum >= (IA64_GR0_REGNUM + 8) &&
- regnum <= (IA64_GR0_REGNUM + 15)) ||
- (regnum >= (IA64_GR0_REGNUM + 16) &&
- regnum <= (IA64_GR0_REGNUM + 31))))
- return 0;
- else if (rw && ptregs) {
- for (i = 0; i < ARRAY_SIZE(gr_reg_to_ptreg_index); i++)
- if (gr_reg_to_ptreg_index[i].reg == regnum) {
- *((unsigned long *)(((void *)ptregs) +
- gr_reg_to_ptreg_index[i].ptregoff)) = *reg;
- break;
- }
- } else if (!rw && ptregs) {
- for (i = 0; i < ARRAY_SIZE(gr_reg_to_ptreg_index); i++)
- if (gr_reg_to_ptreg_index[i].reg == regnum) {
- *reg = *((unsigned long *)
- (((void *)ptregs) +
- gr_reg_to_ptreg_index[i].ptregoff));
- break;
- }
- } else
- result = !unw_access_gr(info, regnum - IA64_GR0_REGNUM,
- reg, &nat, rw);
- return result;
-}
-
-static int kgdb_br_reg(int regnum, struct pt_regs * ptregs,
- struct unw_frame_info *info, unsigned long *reg, int rw)
-{
- int i, result = 1;
-
- if (!(regnum >= IA64_BR0_REGNUM && regnum <= (IA64_BR0_REGNUM + 7)))
- return 0;
-
- switch (regnum) {
- case IA64_BR0_REGNUM:
- case IA64_BR0_REGNUM + 6:
- case IA64_BR0_REGNUM + 7:
- if (rw) {
- for (i = 0; i < ARRAY_SIZE(br_reg_to_ptreg_index); i++)
- if (br_reg_to_ptreg_index[i].reg == regnum) {
- *((unsigned long *)
- (((void *)ptregs) +
- br_reg_to_ptreg_index[i].ptregoff)) =
- *reg;
- break;
- }
- } else
- for (i = 0; i < ARRAY_SIZE(br_reg_to_ptreg_index); i++)
- if (br_reg_to_ptreg_index[i].reg == regnum) {
- *reg = *((unsigned long *)
- (((void *)ptregs) +
- br_reg_to_ptreg_index[i].
- ptregoff));
- break;
- }
- break;
- case IA64_BR0_REGNUM + 1:
- case IA64_BR0_REGNUM + 2:
- case IA64_BR0_REGNUM + 3:
- case IA64_BR0_REGNUM + 4:
- case IA64_BR0_REGNUM + 5:
- result = !unw_access_br(info, regnum - IA64_BR0_REGNUM,
- reg, rw);
- break;
- }
-
- return result;
-}
-
-static int kgdb_fr_reg(int regnum, char *inbuffer, struct pt_regs * ptregs,
- struct unw_frame_info *info, unsigned long *reg,
- struct ia64_fpreg *freg, int rw)
-{
- int result = 1;
-
- if (!(regnum >= IA64_FR0_REGNUM && regnum <= (IA64_FR0_REGNUM + 127)))
- return 0;
-
- switch (regnum) {
- case IA64_FR0_REGNUM + 6:
- case IA64_FR0_REGNUM + 7:
- case IA64_FR0_REGNUM + 8:
- case IA64_FR0_REGNUM + 9:
- case IA64_FR0_REGNUM + 10:
- case IA64_FR0_REGNUM + 11:
- case IA64_FR0_REGNUM + 12:
- if (rw) {
-#ifndef XEN
- char *ptr = inbuffer;
-
- freg->u.bits[0] = *reg;
- kgdb_hex2long(&ptr, &freg->u.bits[1]);
- *(&ptregs->f6 + (regnum - (IA64_FR0_REGNUM + 6))) =
- *freg;
-#else
- printk("%s: %d: writing to fpreg is not supported.\n",
- __func__, __LINE__);
-#endif
- break;
- } else if (!ptregs)
- result = !unw_access_fr(info, regnum - IA64_FR0_REGNUM,
- freg, rw);
- else
-#ifndef XEN
- *freg =
- *(&ptregs->f6 + (regnum - (IA64_FR0_REGNUM + 6)));
-#else
- //XXX struct ia64_fpreg and struct pt_fpreg are same.
- *freg = *((struct ia64_fpreg*)(&ptregs->f6 +
- (regnum - (IA64_FR0_REGNUM + 6))));
-#endif
- break;
- default:
- if (!rw)
- result = !unw_access_fr(info, regnum - IA64_FR0_REGNUM,
- freg, rw);
- else
- result = 0;
- break;
- }
-
- return result;
-}
-
-static int kgdb_ar_reg(int regnum, struct pt_regs * ptregs,
- struct unw_frame_info *info, unsigned long *reg, int rw)
-{
- int result = 0, i;
-
- if (!(regnum >= IA64_AR0_REGNUM && regnum <= IA64_EC_REGNUM))
- return 0;
-
- if (rw && ptregs) {
- for (i = 0; i < ARRAY_SIZE(ar_reg_to_ptreg_index); i++)
- if (ar_reg_to_ptreg_index[i].reg == regnum) {
- *((unsigned long *) (((void *)ptregs) +
- ar_reg_to_ptreg_index[i].ptregoff)) =
- *reg;
- result = 1;
- break;
- }
- } else if (ptregs) {
- for (i = 0; i < ARRAY_SIZE(ar_reg_to_ptreg_index); i++)
- if (ar_reg_to_ptreg_index[i].reg == regnum) {
- *reg = *((unsigned long *) (((void *)ptregs) +
- ar_reg_to_ptreg_index[i].ptregoff));
- result = 1;
- break;
- }
- }
-
- if (result)
- return result;
-
- result = 1;
-
- switch (regnum) {
- case IA64_CSD_REGNUM:
- result = !unw_access_ar(info, UNW_AR_CSD, reg, rw);
- break;
- case IA64_SSD_REGNUM:
- result = !unw_access_ar(info, UNW_AR_SSD, reg, rw);
- break;
- case IA64_UNAT_REGNUM:
- result = !unw_access_ar(info, UNW_AR_RNAT, reg, rw);
- break;
- case IA64_RNAT_REGNUM:
- result = !unw_access_ar(info, UNW_AR_RNAT, reg, rw);
- break;
- case IA64_BSPSTORE_REGNUM:
- result = !unw_access_ar(info, UNW_AR_RNAT, reg, rw);
- break;
- case IA64_PFS_REGNUM:
- result = !unw_access_ar(info, UNW_AR_RNAT, reg, rw);
- break;
- case IA64_LC_REGNUM:
- result = !unw_access_ar(info, UNW_AR_LC, reg, rw);
- break;
- case IA64_EC_REGNUM:
- result = !unw_access_ar(info, UNW_AR_EC, reg, rw);
- break;
- case IA64_FPSR_REGNUM:
- result = !unw_access_ar(info, UNW_AR_FPSR, reg, rw);
- break;
- case IA64_RSC_REGNUM:
- result = !unw_access_ar(info, UNW_AR_RSC, reg, rw);
- break;
- case IA64_CCV_REGNUM:
- result = !unw_access_ar(info, UNW_AR_CCV, reg, rw);
- break;
- default:
- result = 0;
- }
-
- return result;
-}
-
-#ifndef XEN
-void kgdb_get_reg(char *outbuffer, int regnum, struct unw_frame_info *info,
- struct pt_regs *ptregs)
-#else
-static int
-kgdb_get_reg(int regnum, struct unw_frame_info *info,
- struct cpu_user_regs* ptregs,
- unsigned long* __reg, struct ia64_fpreg* __freg)
-#endif
-{
- unsigned long reg, size = 0, *mem = &reg;
- struct ia64_fpreg freg;
-
- if (kgdb_gr_reg(regnum, info, &reg, 0) ||
- kgdb_gr_ptreg(regnum, ptregs, info, &reg, 0) ||
- kgdb_br_reg(regnum, ptregs, info, &reg, 0) ||
- kgdb_ar_reg(regnum, ptregs, info, &reg, 0))
- size = sizeof(reg);
- else if (kgdb_fr_reg(regnum, NULL, ptregs, info, &reg, &freg, 0)) {
- size = sizeof(freg);
- mem = (unsigned long *)&freg;
- } else if (regnum == IA64_IP_REGNUM) {
- if (!ptregs) {
- unw_get_ip(info, &reg);
- size = sizeof(reg);
- } else {
- reg = ptregs->cr_iip;
- size = sizeof(reg);
- }
- } else if (regnum == IA64_CFM_REGNUM) {
- if (!ptregs)
- unw_get_cfm(info, &reg);
- else
- reg = ptregs->cr_ifs;
- size = sizeof(reg);
- } else if (regnum == IA64_PSR_REGNUM) {
-#ifndef XEN
- if (!ptregs && kgdb_usethread)
- ptregs = (struct pt_regs *)
- ((unsigned long)kgdb_usethread +
- IA64_STK_OFFSET) - 1;
-#endif
- if (ptregs)
- reg = ptregs->cr_ipsr;
- size = sizeof(reg);
- } else if (regnum == IA64_PR_REGNUM) {
- if (ptregs)
- reg = ptregs->pr;
- else
- unw_access_pr(info, &reg, 0);
- size = sizeof(reg);
- } else if (regnum == IA64_BSP_REGNUM) {
- unw_get_bsp(info, &reg);
- size = sizeof(reg);
- }
-
-#ifndef XEN
- if (size) {
- kgdb_mem2hex((char *) mem, outbuffer, size);
- outbuffer[size*2] = 0;
- }
- else
- strlcpy(outbuffer, "E0", sizeof("E0"));
-
- return;
-#else
- if (size) {
- if (size == sizeof(reg)) {
- *__reg = reg;
- } else {
- BUG_ON(size != sizeof(freg));
- *__freg = freg;
- }
- return 0;
- }
-
- return -1;
-#endif
-}
-
-#ifndef XEN
-static int inline kgdb_get_blocked_state(struct task_struct *p,
- struct unw_frame_info *unw)
-#else
-static int
-kgdb_get_blocked_state(struct vcpu *p,
- struct cpu_user_regs *regs,
- struct unw_frame_info *unw)
-#endif
-{
- unsigned long ip;
- int count = 0;
-
-#ifndef XEN
- unw_init_from_blocked_task(unw, p);
-#endif
- ip = 0UL;
- do {
- if (unw_unwind(unw) < 0)
- return -1;
- unw_get_ip(unw, &ip);
-#ifndef XEN
- if (!in_sched_functions(ip))
- break;
-#else
- dbg_printk("ip 0x%lx cr_iip 0x%lx\n", ip, regs->cr_iip);
- if (ip == regs->cr_iip)
- break;
-#endif
- } while (count++ < 16);
-
- if (!ip)
- return -1;
- else
- return 0;
-}
-
-struct gdb_callback_arg
-{
- struct cpu_user_regs* regs;
- unsigned long regnum;
- unsigned long* reg;
- struct pt_fpreg* freg;
-
- int error;
- // 1: not supported
- // 0: success
- // -1: failure
-};
-
-static void
-gdb_get_reg_callback(struct unw_frame_info* info, void* __arg)
-{
- struct gdb_callback_arg* arg = (struct gdb_callback_arg*)__arg;
-
- if (kgdb_get_blocked_state(current, arg->regs, info) < 0) {
- dbg_printk("%s: kgdb_get_blocked_state failed\n", __func__);
- arg->error = -1;
- return;
- }
- //XXX struct ia64_fpreg and struct pt_fpreg are same.
- if (kgdb_get_reg(arg->regnum, info, arg->regs, arg->reg,
- (struct ia64_fpreg*)arg->freg) < 0) {
- dbg_printk("%s: kgdb_get_reg failed\n", __func__);
- arg->error = 1;
- return;
- }
- arg->error = 0;
- return;
-}
-
-void
-gdb_arch_read_reg(unsigned long regnum, struct cpu_user_regs *regs,
- struct gdb_context *ctx)
-{
- struct gdb_callback_arg arg;
- unsigned long reg;
- struct pt_fpreg freg;
- char buf[16 * 2 + 1];
-
- if (regnum >= NUM_REGS) {
- dbg_printk("%s: regnum %ld\n", __func__, regnum);
- goto out_err;
- }
-
- arg.regs = regs;
- arg.regnum = regnum;
- arg.reg = &reg;
- arg.freg = &freg;
- arg.error = 0;
- unw_init_running(&gdb_get_reg_callback, (void*)&arg);
- if (arg.error < 0) {
- dbg_printk("%s: gdb_get_reg_callback failed\n", __func__);
- goto out_err;
- }
-
- if (arg.error > 0) {
- // notify gdb that this register is not supported.
- // see fetch_register_using_p() in gdb/remote.c.
- safe_strcpy(buf, "x");
- } else if (IA64_FR0_REGNUM <= regnum && regnum <= IA64_FR0_REGNUM + 127) {
- snprintf(buf, sizeof(buf), "%.016lx", swab64(freg.u.bits[0]));
- snprintf(buf + 16, sizeof(buf) - 16, "%.016lx", swab64(freg.u.bits[1]));
- } else {
- snprintf(buf, sizeof(buf), "%.016lx", swab64(reg));
- }
-out:
- return gdb_send_reply(buf, ctx);
-
-out_err:
- dbg_printk("Register read unsupported regnum = 0x%lx\n", regnum);
- safe_strcpy(buf, "E0");
- goto out;
-}
-#endif
-
-void
-gdb_arch_resume(struct cpu_user_regs *regs,
- unsigned long addr, unsigned long type,
- struct gdb_context *ctx)
-{
- /* XXX */
- if (type == GDB_STEP) {
- gdb_send_reply("S01", ctx);
- }
-}
-
-void
-gdb_arch_print_state(struct cpu_user_regs *regs)
-{
- /* XXX */
-}
-
-void
-gdb_arch_enter(struct cpu_user_regs *regs)
-{
- /* nothing */
-}
-
-void
-gdb_arch_exit(struct cpu_user_regs *regs)
-{
- /* nothing */
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * End:
- */
diff --git a/xen/arch/ia64/xen/hpsimserial.c b/xen/arch/ia64/xen/hpsimserial.c
deleted file mode 100644
index 3d25d52c76..0000000000
--- a/xen/arch/ia64/xen/hpsimserial.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * HP Ski simulator serial I/O
- *
- * Copyright (C) 2004 Hewlett-Packard Co
- * Dan Magenheimer <dan.magenheimer@hp.com>
- */
-
-#include <linux/config.h>
-#include <xen/init.h>
-#include <xen/sched.h>
-#include <xen/serial.h>
-#include "hpsim_ssc.h"
-
-static void hp_ski_putc(struct serial_port *port, char c)
-{
- ia64_ssc(c,0,0,0,SSC_PUTCHAR);
-}
-
-static struct uart_driver hp_ski = { .putc = hp_ski_putc };
-
-void __init hpsim_serial_init(void)
-{
- serial_register_uart(0, &hp_ski, 0);
-}
diff --git a/xen/arch/ia64/xen/hypercall.c b/xen/arch/ia64/xen/hypercall.c
deleted file mode 100644
index 18930bfa50..0000000000
--- a/xen/arch/ia64/xen/hypercall.c
+++ /dev/null
@@ -1,825 +0,0 @@
-/*
- * Hypercall implementations
- *
- * Copyright (C) 2005 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- *
- */
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <xen/hypercall.h>
-#include <xen/multicall.h>
-#include <xen/guest_access.h>
-#include <xen/mm.h>
-
-#include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
-#include <asm/sal.h> /* FOR struct ia64_sal_retval */
-#include <asm/fpswa.h> /* FOR struct fpswa_ret_t */
-
-#include <asm/vmx.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/vcpu.h>
-#include <asm/dom_fw.h>
-#include <public/domctl.h>
-#include <public/sysctl.h>
-#include <public/event_channel.h>
-#include <public/memory.h>
-#include <public/sched.h>
-#include <xen/irq.h>
-#include <asm/hw_irq.h>
-#include <public/physdev.h>
-#include <xen/domain.h>
-#include <public/callback.h>
-#include <xen/event.h>
-#include <xen/perfc.h>
-#include <public/arch-ia64/debug_op.h>
-#include <asm/sioemu.h>
-#include <public/arch-ia64/sioemu.h>
-#include <xen/pci.h>
-
-static IA64FAULT
-xen_hypercall (struct pt_regs *regs)
-{
- uint32_t cmd = (uint32_t)regs->r2;
- printk("Warning %s should not be called %d\n", __FUNCTION__, cmd);
- return IA64_NO_FAULT;
-}
-
-static IA64FAULT
-xen_fast_hypercall (struct pt_regs *regs)
-{
- uint32_t cmd = (uint32_t)regs->r2;
- switch (cmd) {
- case __HYPERVISOR_ia64_fast_eoi:
- printk("Warning %s should not be called %d\n",
- __FUNCTION__, cmd);
- break;
- default:
- regs->r8 = -ENOSYS;
- }
- return IA64_NO_FAULT;
-}
-
-static long __do_pirq_guest_eoi(struct domain *d, int pirq)
-{
- if ( pirq < 0 || pirq >= NR_IRQS )
- return -EINVAL;
- if ( d->arch.auto_unmask ) {
- spin_lock(&d->event_lock);
- evtchn_unmask(pirq_to_evtchn(d, pirq));
- spin_unlock(&d->event_lock);
- }
- pirq_guest_eoi(pirq_info(d, pirq));
- return 0;
-}
-
-long do_pirq_guest_eoi(int pirq)
-{
- return __do_pirq_guest_eoi(current->domain, pirq);
-}
-
-static void
-fw_hypercall_ipi (struct pt_regs *regs)
-{
- int cpu = regs->r14;
- int vector = regs->r15;
- struct vcpu *targ;
- struct domain *d = current->domain;
-
- /* Be sure the target exists. */
- if (cpu >= d->max_vcpus)
- return;
- targ = d->vcpu[cpu];
- if (targ == NULL)
- return;
-
- if (vector == XEN_SAL_BOOT_RENDEZ_VEC
- && (!targ->is_initialised
- || test_bit(_VPF_down, &targ->pause_flags))) {
-
- /* First start: initialize vpcu. */
- if (!targ->is_initialised) {
- if (arch_set_info_guest (targ, NULL) != 0) {
- printk ("arch_boot_vcpu: failure\n");
- return;
- }
- }
-
- /* First or next rendez-vous: set registers. */
- vcpu_init_regs (targ);
- vcpu_regs (targ)->cr_iip = d->arch.sal_data->boot_rdv_ip;
- vcpu_regs (targ)->r1 = d->arch.sal_data->boot_rdv_r1;
- vcpu_regs (targ)->b0 = FW_HYPERCALL_SAL_RETURN_PADDR;
-
- if (test_and_clear_bit(_VPF_down,
- &targ->pause_flags)) {
- vcpu_wake(targ);
- printk(XENLOG_INFO "arch_boot_vcpu: vcpu %d awaken\n",
- targ->vcpu_id);
- }
- else
- printk ("arch_boot_vcpu: huu, already awaken!\n");
- }
- else {
- int running = targ->is_running;
- vcpu_pend_interrupt(targ, vector);
- vcpu_unblock(targ);
- if (running)
- smp_send_event_check_cpu(targ->processor);
- }
- return;
-}
-
-static int
-fpswa_get_domain_addr(struct vcpu *v, unsigned long gpaddr, size_t size,
- void **virt, struct page_info **page, const char *name)
-{
- int cross_page_boundary;
-
- if (gpaddr == 0) {
- *virt = 0;
- return 0;
- }
-
- cross_page_boundary = (((gpaddr & ~PAGE_MASK) + size) > PAGE_SIZE);
- if (unlikely(cross_page_boundary)) {
- /* this case isn't implemented */
- gdprintk(XENLOG_ERR,
- "%s: fpswa hypercall is called with "
- "page crossing argument %s 0x%lx\n",
- __func__, name, gpaddr);
- return -ENOSYS;
- }
-
-again:
- *virt = domain_mpa_to_imva(v->domain, gpaddr);
- *page = virt_to_page(*virt);
- if (get_page(*page, current->domain) == 0) {
- if (page_get_owner(*page) != current->domain) {
- *page = NULL;
- return -EFAULT;
- }
- goto again;
- }
-
- return 0;
-}
-
-static fpswa_ret_t
-fw_hypercall_fpswa (struct vcpu *v, struct pt_regs *regs)
-{
- fpswa_ret_t ret = {-1, 0, 0, 0};
- unsigned long bundle[2] = { regs->r15, regs->r16};
- fp_state_t fp_state;
- struct page_info *lp_page = NULL;
- struct page_info *lv_page = NULL;
- struct page_info *hp_page = NULL;
- struct page_info *hv_page = NULL;
- XEN_EFI_RR_DECLARE(rr6, rr7);
-
- if (unlikely(PSCBX(v, fpswa_ret).status != 0 &&
- PSCBX(v, fpswa_ret).status != IA64_RETRY)) {
- ret = PSCBX(v, fpswa_ret);
- PSCBX(v, fpswa_ret) = (fpswa_ret_t){0, 0, 0, 0};
- return ret;
- }
-
- if (!fpswa_interface)
- goto error;
-
- memset(&fp_state, 0, sizeof(fp_state));
- fp_state.bitmask_low64 = regs->r22;
- fp_state.bitmask_high64 = regs->r23;
-
- /* bit6..bit11 */
- if ((fp_state.bitmask_low64 & 0xfc0) != 0xfc0) {
- /* other cases isn't supported yet */
- gdprintk(XENLOG_ERR, "%s unsupported bitmask_low64 0x%lx\n",
- __func__, fp_state.bitmask_low64);
- goto error;
- }
- if (regs->r25 == 0)
- /* fp_state.fp_state_low_volatile must be supplied */
- goto error;
-
- /* eager save/lazy restore fpu: f32...f127 */
- if ((~fp_state.bitmask_low64 & ((1UL << 31) - 1)) != 0 ||
- ~fp_state.bitmask_high64 != 0) {
- if (VMX_DOMAIN(v))
- vmx_lazy_load_fpu(v);
- else
- ia64_lazy_load_fpu(v);
- }
-
- if (fpswa_get_domain_addr(v, regs->r24,
- sizeof(fp_state.fp_state_low_preserved),
- (void*)&fp_state.fp_state_low_preserved,
- &lp_page, "fp_state_low_preserved") < 0)
- goto error;
- if (fpswa_get_domain_addr(v, regs->r25,
- sizeof(fp_state.fp_state_low_volatile),
- (void*)&fp_state.fp_state_low_volatile,
- &lv_page, "fp_state_low_volatile") < 0)
- goto error;
- if (fpswa_get_domain_addr(v, regs->r26,
- sizeof(fp_state.fp_state_high_preserved),
- (void*)&fp_state.fp_state_high_preserved,
- &hp_page, "fp_state_low_preserved") < 0)
- goto error;
- if (fpswa_get_domain_addr(v, regs->r27,
- sizeof(fp_state.fp_state_high_volatile),
- (void*)&fp_state.fp_state_high_volatile,
- &hv_page, "fp_state_high_volatile") < 0)
- goto error;
-
- XEN_EFI_RR_ENTER(rr6, rr7);
- ret = (*fpswa_interface->fpswa)(regs->r14,
- bundle,
- &regs->r17, /* pipsr */
- &regs->r18, /* pfsr */
- &regs->r19, /* pisr */
- &regs->r20, /* ppreds */
- &regs->r21, /* pifs */
- &fp_state);
- XEN_EFI_RR_LEAVE(rr6, rr7);
-
-error:
- if (lp_page != NULL)
- put_page(lp_page);
- if (lv_page != NULL)
- put_page(lv_page);
- if (hp_page != NULL)
- put_page(hp_page);
- if (hv_page != NULL)
- put_page(hv_page);
- return ret;
-}
-
-static fpswa_ret_t
-fw_hypercall_fpswa_error(void)
-{
- return (fpswa_ret_t) {-1, 0, 0, 0};
-}
-
-IA64FAULT
-ia64_hypercall(struct pt_regs *regs)
-{
- struct vcpu *v = current;
- struct sal_ret_values x;
- efi_status_t efi_ret_value;
- fpswa_ret_t fpswa_ret;
- IA64FAULT fault;
- unsigned long index = regs->r2 & FW_HYPERCALL_NUM_MASK_HIGH;
-
- perfc_incra(fw_hypercall, index >> 8);
- switch (index) {
- case FW_HYPERCALL_XEN:
- return xen_hypercall(regs);
-
- case FW_HYPERCALL_XEN_FAST:
- return xen_fast_hypercall(regs);
-
- case FW_HYPERCALL_PAL_CALL:
- //printk("*** PAL hypercall: index=%d\n",regs->r28);
- //FIXME: This should call a C routine
-#if 0
- // This is very conservative, but avoids a possible
- // (and deadly) freeze in paravirtualized domains due
- // to a yet-to-be-found bug where pending_interruption
- // is zero when it shouldn't be. Since PAL is called
- // in the idle loop, this should resolve it
- VCPU(v,pending_interruption) = 1;
-#endif
- if (regs->r28 == PAL_HALT_LIGHT) {
- if (vcpu_deliverable_interrupts(v) ||
- event_pending(v)) {
- perfc_incr(idle_when_pending);
- vcpu_pend_unspecified_interrupt(v);
-//printk("idle w/int#%d pending!\n",pi);
-//this shouldn't happen, but it apparently does quite a bit! so don't
-//allow it to happen... i.e. if a domain has an interrupt pending and
-//it tries to halt itself because it thinks it is idle, just return here
-//as deliver_pending_interrupt is called on the way out and will deliver it
- }
- else {
- perfc_incr(pal_halt_light);
- migrate_timer(&v->arch.hlt_timer,
- v->processor);
- set_timer(&v->arch.hlt_timer,
- vcpu_get_next_timer_ns(v));
- do_sched_op_compat(SCHEDOP_block, 0);
- /* do_block only pends a softirq */
- do_softirq();
- stop_timer(&v->arch.hlt_timer);
- /* do_block() calls
- * local_event_delivery_enable(),
- * but PAL CALL must be called with
- * psr.i = 0 and psr.i is unchanged.
- * SDM vol.2 Part I 11.10.2
- * PAL Calling Conventions.
- */
- local_event_delivery_disable();
- }
- regs->r8 = 0;
- regs->r9 = 0;
- regs->r10 = 0;
- regs->r11 = 0;
- }
- else {
- struct ia64_pal_retval y;
-
- if (regs->r28 >= PAL_COPY_PAL)
- y = xen_pal_emulator
- (regs->r28, vcpu_get_gr (v, 33),
- vcpu_get_gr (v, 34),
- vcpu_get_gr (v, 35));
- else
- y = xen_pal_emulator(regs->r28,regs->r29,
- regs->r30,regs->r31);
- regs->r8 = y.status; regs->r9 = y.v0;
- regs->r10 = y.v1; regs->r11 = y.v2;
- }
- break;
- case FW_HYPERCALL_SAL_CALL:
- x = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33),
- vcpu_get_gr(v,34),vcpu_get_gr(v,35),
- vcpu_get_gr(v,36),vcpu_get_gr(v,37),
- vcpu_get_gr(v,38),vcpu_get_gr(v,39));
- regs->r8 = x.r8; regs->r9 = x.r9;
- regs->r10 = x.r10; regs->r11 = x.r11;
- break;
- case FW_HYPERCALL_SAL_RETURN:
- if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
- vcpu_sleep_nosync(v);
- break;
- case FW_HYPERCALL_EFI_CALL:
- efi_ret_value = efi_emulator (regs, &fault);
- if (fault != IA64_NO_FAULT) return fault;
- regs->r8 = efi_ret_value;
- break;
- case FW_HYPERCALL_IPI:
- fw_hypercall_ipi (regs);
- break;
- case FW_HYPERCALL_SET_SHARED_INFO_VA:
- regs->r8 = domain_set_shared_info_va (regs->r28);
- break;
- case FW_HYPERCALL_FPSWA_BASE:
- switch (regs->r2) {
- case FW_HYPERCALL_FPSWA_BROKEN:
- gdprintk(XENLOG_WARNING,
- "Old fpswa hypercall was called (0x%lx).\n"
- "Please update your domain builder. ip 0x%lx\n",
- FW_HYPERCALL_FPSWA_BROKEN, regs->cr_iip);
- fpswa_ret = fw_hypercall_fpswa_error();
- break;
- case FW_HYPERCALL_FPSWA:
- fpswa_ret = fw_hypercall_fpswa(v, regs);
- break;
- default:
- gdprintk(XENLOG_ERR, "unknown fpswa hypercall %lx\n",
- regs->r2);
- fpswa_ret = fw_hypercall_fpswa_error();
- break;
- }
- regs->r8 = fpswa_ret.status;
- regs->r9 = fpswa_ret.err0;
- regs->r10 = fpswa_ret.err1;
- regs->r11 = fpswa_ret.err2;
- break;
- case __HYPERVISOR_opt_feature:
- {
- XEN_GUEST_HANDLE(void) arg;
- struct xen_ia64_opt_feature optf;
- set_xen_guest_handle(arg, (void*)(vcpu_get_gr(v, 32)));
- if (copy_from_guest(&optf, arg, 1) == 0)
- regs->r8 = domain_opt_feature(v->domain, &optf);
- else
- regs->r8 = -EFAULT;
- break;
- }
- case FW_HYPERCALL_SIOEMU:
- sioemu_hypercall(regs);
- break;
- default:
- printk("unknown ia64 fw hypercall %lx\n", regs->r2);
- regs->r8 = do_ni_hypercall();
- }
- return IA64_NO_FAULT;
-}
-
-#define next_arg(fmt, args) ({ \
- unsigned long __arg; \
- switch ( *(fmt)++ ) \
- { \
- case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \
- case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \
- case 'h': __arg = (unsigned long)va_arg(args, void *); break; \
- default: __arg = 0; BUG(); \
- } \
- __arg; \
-})
-
-unsigned long hypercall_create_continuation(
- unsigned int op, const char *format, ...)
-{
- struct mc_state *mcs = &current->mc_state;
- struct vcpu *v = current;
- const char *p = format;
- unsigned long arg;
- unsigned int i;
- va_list args;
-
- va_start(args, format);
-
- if (test_bit(_MCSF_in_multicall, &mcs->flags)) {
- dprintk(XENLOG_DEBUG, "PREEMPT happen in multicall\n");
- __set_bit(_MCSF_call_preempted, &mcs->flags);
- for (i = 0; *p != '\0'; i++)
- mcs->call.args[i] = next_arg(p, args);
- }
- else {
- vcpu_set_gr(v, 15, op, 0);
-
- for (i = 0; *p != '\0'; i++) {
- arg = next_arg(p, args);
- vcpu_set_gr(v, 16 + i, arg, 0);
- }
-
- if (i >= 6)
- panic("Too many args for hypercall continuation\n");
-
- // Clean other argument to 0
- while (i < 6) {
- vcpu_set_gr(v, 16 + i, 0, 0);
- i++;
- }
-
- // re-execute break;
- vcpu_decrement_iip(v);
-
- v->arch.hypercall_continuation = 1;
- }
-
- va_end(args);
- return op;
-}
-
-/* Need make this function common */
-extern int
-iosapic_guest_read(
- unsigned long physbase, unsigned int reg, u32 *pval);
-extern int
-iosapic_guest_write(
- unsigned long physbase, unsigned int reg, u32 pval);
-
-
-/*
- * XXX: We don't support MSI for PCI passthrough at present, so make the
- * following 2 functions dummy for now. They shouldn't return -ENOSYS
- * because xend invokes them (the x86 version of them is necessary for
- * x86 Xen); if they return -ENOSYS, xend would disallow us to create
- * IPF HVM guest with devices assigned so here they can return 0.
- */
-static int physdev_map_pirq(struct physdev_map_pirq *map)
-{
- return 0;
-}
-
-static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
-{
- return 0;
-}
-
-
-long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
-{
- int irq;
- long ret;
-
- switch ( cmd )
- {
- case PHYSDEVOP_eoi: {
- struct physdev_eoi eoi;
- ret = -EFAULT;
- if ( copy_from_guest(&eoi, arg, 1) != 0 )
- break;
- ret = __do_pirq_guest_eoi(current->domain, eoi.irq);
- break;
- }
-
- case PHYSDEVOP_pirq_eoi_gmfn_v1:
- case PHYSDEVOP_pirq_eoi_gmfn_v2: {
- struct physdev_pirq_eoi_gmfn info;
- unsigned long mfn;
-
- BUILD_BUG_ON(NR_IRQS > (PAGE_SIZE * 8));
-
- ret = -EFAULT;
- if ( copy_from_guest(&info, arg, 1) != 0 )
- break;
-
- ret = -EINVAL;
- mfn = gmfn_to_mfn(current->domain, info.gmfn);
- if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), current->domain) )
- break;
-
- if ( cmpxchg(&current->domain->arch.pirq_eoi_map_mfn, 0, mfn) != 0 )
- {
- put_page(mfn_to_page(mfn));
- ret = -EBUSY;
- break;
- }
-
- current->domain->arch.pirq_eoi_map = mfn_to_virt(mfn);
- if ( cmd == PHYSDEVOP_pirq_eoi_gmfn_v1 )
- current->domain->arch.auto_unmask = 1;
- ret = 0;
- break;
- }
-
- /* Legacy since 0x00030202. */
- case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
- ret = pirq_guest_unmask(current->domain);
- break;
- }
-
- case PHYSDEVOP_irq_status_query: {
- struct physdev_irq_status_query irq_status_query;
- ret = -EFAULT;
- if ( copy_from_guest(&irq_status_query, arg, 1) != 0 )
- break;
- irq = irq_status_query.irq;
- ret = -EINVAL;
- if ( (irq < 0) || (irq >= NR_IRQS) )
- break;
- irq_status_query.flags = 0;
- /* Edge-triggered interrupts don't need an explicit unmask downcall. */
- if ( !strstr(irq_descp(irq)->handler->typename, "edge") )
- irq_status_query.flags |= XENIRQSTAT_needs_eoi;
- ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
- break;
- }
-
- case PHYSDEVOP_apic_read: {
- struct physdev_apic apic;
- ret = -EFAULT;
- if ( copy_from_guest(&apic, arg, 1) != 0 )
- break;
- ret = -EPERM;
- if ( !IS_PRIV(current->domain) )
- break;
- ret = iosapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
- if ( copy_to_guest(arg, &apic, 1) != 0 )
- ret = -EFAULT;
- break;
- }
-
- case PHYSDEVOP_apic_write: {
- struct physdev_apic apic;
- ret = -EFAULT;
- if ( copy_from_guest(&apic, arg, 1) != 0 )
- break;
- ret = -EPERM;
- if ( !IS_PRIV(current->domain) )
- break;
- ret = iosapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
- break;
- }
-
- case PHYSDEVOP_alloc_irq_vector: {
- struct physdev_irq irq_op;
-
- ret = -EFAULT;
- if ( copy_from_guest(&irq_op, arg, 1) != 0 )
- break;
-
- ret = -EPERM;
- if ( !IS_PRIV(current->domain) )
- break;
-
- ret = -EINVAL;
- if ( (irq = irq_op.irq) >= NR_IRQS )
- break;
-
- irq_op.vector = assign_irq_vector(irq);
- ret = copy_to_guest(arg, &irq_op, 1) ? -EFAULT : 0;
- break;
- }
-
- case PHYSDEVOP_free_irq_vector: {
- struct physdev_irq irq_op;
- int vector;
-
- ret = -EFAULT;
- if ( copy_from_guest(&irq_op, arg, 1) != 0 )
- break;
-
- ret = -EPERM;
- if ( !IS_PRIV(current->domain) )
- break;
-
- ret = -EINVAL;
- vector = irq_op.vector;
- if (vector < IA64_FIRST_DEVICE_VECTOR ||
- vector > IA64_LAST_DEVICE_VECTOR)
- break;
-
- /* XXX This should be called, but causes a NAT consumption via the
- * reboot notifier_call_chain in dom0 if a device is hidden for
- * a driver domain using pciback.hide= (specifically, hiding function
- * 1 of a 2 port e1000 card).
- * free_irq_vector(vector);
- */
- ret = 0;
- break;
- }
-
- case PHYSDEVOP_map_pirq: {
- struct physdev_map_pirq map;
-
- ret = -EFAULT;
- if ( copy_from_guest(&map, arg, 1) != 0 )
- break;
-
- ret = physdev_map_pirq(&map);
-
- if ( copy_to_guest(arg, &map, 1) != 0 )
- ret = -EFAULT;
- break;
- }
-
- case PHYSDEVOP_unmap_pirq: {
- struct physdev_unmap_pirq unmap;
-
- ret = -EFAULT;
- if ( copy_from_guest(&unmap, arg, 1) != 0 )
- break;
-
- ret = physdev_unmap_pirq(&unmap);
- break;
- }
-
- case PHYSDEVOP_manage_pci_add: {
- struct physdev_manage_pci manage_pci;
- ret = -EPERM;
- if ( !IS_PRIV(current->domain) )
- break;
- ret = -EFAULT;
- if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
- break;
-
- ret = pci_add_device(0, manage_pci.bus, manage_pci.devfn, NULL);
- break;
- }
-
- case PHYSDEVOP_manage_pci_remove: {
- struct physdev_manage_pci manage_pci;
- ret = -EPERM;
- if ( !IS_PRIV(current->domain) )
- break;
- ret = -EFAULT;
- if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
- break;
-
- ret = pci_remove_device(0, manage_pci.bus, manage_pci.devfn);
- break;
- }
-
- case PHYSDEVOP_manage_pci_add_ext: {
- struct physdev_manage_pci_ext manage_pci_ext;
- struct pci_dev_info pdev_info;
-
- ret = -EPERM;
- if ( !IS_PRIV(current->domain) )
- break;
-
- ret = -EFAULT;
- if ( copy_from_guest(&manage_pci_ext, arg, 1) != 0 )
- break;
-
- pdev_info.is_extfn = !!manage_pci_ext.is_extfn;
- pdev_info.is_virtfn = !!manage_pci_ext.is_virtfn;
- pdev_info.physfn.bus = manage_pci_ext.physfn.bus;
- pdev_info.physfn.devfn = manage_pci_ext.physfn.devfn;
- ret = pci_add_device(0, manage_pci_ext.bus,
- manage_pci_ext.devfn,
- &pdev_info);
- break;
- }
-
- default:
- ret = -ENOSYS;
- printk("not implemented do_physdev_op: %d\n", cmd);
- break;
- }
-
- return ret;
-}
-
-static long register_guest_callback(struct callback_register *reg)
-{
- long ret = 0;
- struct vcpu *v = current;
-
- if (IS_VMM_ADDRESS(reg->address))
- return -EINVAL;
-
- switch ( reg->type )
- {
- case CALLBACKTYPE_event:
- v->arch.event_callback_ip = reg->address;
- break;
-
- case CALLBACKTYPE_failsafe:
- v->arch.failsafe_callback_ip = reg->address;
- break;
-
- default:
- ret = -ENOSYS;
- break;
- }
-
- return ret;
-}
-
-static long unregister_guest_callback(struct callback_unregister *unreg)
-{
- return -EINVAL;
-}
-
-/* First time to add callback to xen/ia64, so let's just stick to
- * the newer callback interface.
- */
-long do_callback_op(int cmd, XEN_GUEST_HANDLE(const_void) arg)
-{
- long ret;
-
- switch ( cmd )
- {
- case CALLBACKOP_register:
- {
- struct callback_register reg;
-
- ret = -EFAULT;
- if ( copy_from_guest(&reg, arg, 1) )
- break;
-
- ret = register_guest_callback(&reg);
- }
- break;
-
- case CALLBACKOP_unregister:
- {
- struct callback_unregister unreg;
-
- ret = -EFAULT;
- if ( copy_from_guest(&unreg, arg, 1) )
- break;
-
- ret = unregister_guest_callback(&unreg);
- }
- break;
-
- default:
- ret = -ENOSYS;
- break;
- }
-
- return ret;
-}
-
-unsigned long
-do_ia64_debug_op(unsigned long cmd, unsigned long domain,
- XEN_GUEST_HANDLE(xen_ia64_debug_op_t) u_debug_op)
-{
- xen_ia64_debug_op_t curop, *op = &curop;
- struct domain *d;
- long ret = 0;
-
- if (copy_from_guest(op, u_debug_op, 1))
- return -EFAULT;
- d = rcu_lock_domain_by_id(domain);
- if (d == NULL)
- return -ESRCH;
- if (!IS_PRIV_FOR(current->domain, d)) {
- ret = -EPERM;
- goto out;
- }
-
- switch (cmd) {
- case XEN_IA64_DEBUG_OP_SET_FLAGS:
- d->arch.debug_flags = op->flags;
- break;
- case XEN_IA64_DEBUG_OP_GET_FLAGS:
- op->flags = d->arch.debug_flags;
- if (copy_to_guest(u_debug_op, op, 1))
- ret = -EFAULT;
- break;
- default:
- ret = -ENOSYS;
- }
-out:
- rcu_unlock_domain(d);
- return ret;
-}
diff --git a/xen/arch/ia64/xen/hyperprivop.S b/xen/arch/ia64/xen/hyperprivop.S
deleted file mode 100644
index 44cbe084ce..0000000000
--- a/xen/arch/ia64/xen/hyperprivop.S
+++ /dev/null
@@ -1,2225 +0,0 @@
-/*
- * arch/ia64/kernel/hyperprivop.S
- *
- * Copyright (C) 2005 Hewlett-Packard Co
- * Dan Magenheimer <dan.magenheimer@hp.com>
- */
-
-#include <linux/config.h>
-
-#include <asm/asmmacro.h>
-#include <asm/kregs.h>
-#include <asm/offsets.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/debugger.h>
-#include <asm/asm-xsi-offsets.h>
-#include <asm/pgtable.h>
-#include <asm/vmmu.h>
-#include <public/xen.h>
-
-#ifdef PERF_COUNTERS
-#define PERFC(n) (THIS_CPU(perfcounters) + (IA64_PERFC_ ## n) * 4)
-#endif
-
-#define PAGE_PHYS (__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RWX)
-
-#if 1 // change to 0 to turn off all fast paths
-# define FAST_HYPERPRIVOPS
-# ifdef PERF_COUNTERS
-# define FAST_HYPERPRIVOP_CNT
-# define FAST_HYPERPRIVOP_PERFC(N) PERFC(fast_hyperprivop + N)
-# define FAST_REFLECT_CNT
-# endif
-
-//#define FAST_TICK // mostly working (unat problems) but default off for now
-//#define FAST_TLB_MISS_REFLECT // mostly working but default off for now
-# undef FAST_ITC //XXX TODO fast_itc doesn't support dom0 vp yet
-# define FAST_BREAK
-# undef FAST_ACCESS_REFLECT //XXX TODO fast_access_reflect
- // doesn't support dom0 vp yet.
-# define FAST_RFI
-// TODO: Since we use callback to deliver interrupt,
-// FAST_SSM_I needs to be rewritten.
-# define FAST_SSM_I
-# define FAST_PTC_GA
-# undef RFI_TO_INTERRUPT // not working yet
-# define FAST_SET_RR0_TO_RR4
-#endif
-
-#ifdef CONFIG_SMP
- //#warning "FIXME: ptc.ga instruction requires spinlock for SMP"
- #undef FAST_PTC_GA
-#endif
-
-// FIXME: turn off for now... but NaTs may crash Xen so re-enable soon!
-#define HANDLE_AR_UNAT
-
-// FIXME: This is defined in include/asm-ia64/hw_irq.h but this
-// doesn't appear to be include'able from assembly?
-#define IA64_TIMER_VECTOR 0xef
-
-// Note: not hand-scheduled for now
-// Registers at entry
-// r16 == cr.isr
-// r17 == cr.iim
-// r18 == XSI_PSR_IC_OFS
-// r19 == ipsr.cpl
-// r31 == pr
-GLOBAL_ENTRY(fast_hyperprivop)
- adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
- // HYPERPRIVOP_SSM_I?
- // assumes domain interrupts pending, so just do it
- cmp.eq p7,p6=HYPERPRIVOP_SSM_I,r17
-(p7) br.sptk.many hyper_ssm_i;;
-
- // Check pending event indication
- ld8 r20=[r20] // interrupt_mask_addr
- ;;
- ld1 r22=[r20],-1 // evtchn_upcall_mask
- ;;
- ld1 r20=[r20] // evtchn_upcall_pending
-
- // HYPERPRIVOP_RFI?
- cmp.eq p7,p6=HYPERPRIVOP_RFI,r17
-(p7) br.sptk.many hyper_rfi
- ;;
-#ifndef FAST_HYPERPRIVOPS // see beginning of file
- br.sptk.many dispatch_break_fault ;;
-#endif
- // if event enabled and there are pending events
- cmp.ne p7,p0=r20,r0
- ;;
- cmp.eq.and p7,p0=r22,r0
-(p7) br.spnt.many dispatch_break_fault
- ;;
-
- // HYPERPRIVOP_COVER?
- cmp.eq p7,p0=HYPERPRIVOP_COVER,r17
-(p7) br.sptk.many hyper_cover
- ;;
-
- // HYPERPRIVOP_SSM_DT?
- cmp.eq p7,p0=HYPERPRIVOP_SSM_DT,r17
-(p7) br.sptk.many hyper_ssm_dt
- ;;
-
- // HYPERPRIVOP_RSM_DT?
- cmp.eq p7,p0=HYPERPRIVOP_RSM_DT,r17
-(p7) br.sptk.many hyper_rsm_dt
- ;;
-
- // HYPERPRIVOP_SET_ITM?
- cmp.eq p7,p0=HYPERPRIVOP_SET_ITM,r17
-(p7) br.sptk.many hyper_set_itm
- ;;
-
- // HYPERPRIVOP_SET_RR0_TO_RR4?
- cmp.eq p7,p0=HYPERPRIVOP_SET_RR0_TO_RR4,r17
-(p7) br.sptk.many hyper_set_rr0_to_rr4
- ;;
-
- // HYPERPRIVOP_SET_RR?
- cmp.eq p7,p0=HYPERPRIVOP_SET_RR,r17
-(p7) br.sptk.many hyper_set_rr
- ;;
-
- // HYPERPRIVOP_GET_RR?
- cmp.eq p7,p0=HYPERPRIVOP_GET_RR,r17
-(p7) br.sptk.many hyper_get_rr
- ;;
-
- // HYPERPRIVOP_GET_PSR?
- cmp.eq p7,p0=HYPERPRIVOP_GET_PSR,r17
-(p7) br.sptk.many hyper_get_psr
- ;;
-
- // HYPERPRIVOP_PTC_GA?
- cmp.eq p7,p0=HYPERPRIVOP_PTC_GA,r17
-(p7) br.sptk.many hyper_ptc_ga
- ;;
-
- // HYPERPRIVOP_ITC_D?
- cmp.eq p7,p0=HYPERPRIVOP_ITC_D,r17
-(p7) br.sptk.many hyper_itc_d
- ;;
-
- // HYPERPRIVOP_ITC_I?
- cmp.eq p7,p0=HYPERPRIVOP_ITC_I,r17
-(p7) br.sptk.many hyper_itc_i
- ;;
-
- // HYPERPRIVOP_THASH?
- cmp.eq p7,p0=HYPERPRIVOP_THASH,r17
-(p7) br.sptk.many hyper_thash
- ;;
-
- // HYPERPRIVOP_SET_KR?
- cmp.eq p7,p0=HYPERPRIVOP_SET_KR,r17
-(p7) br.sptk.many hyper_set_kr
- ;;
-
- // if not one of the above, give up for now and do it the slow way
- br.sptk.many dispatch_break_fault
- ;;
-END(fast_hyperprivop)
-
-// give up for now if: ipsr.be==1, ipsr.pp==1
-// from reflect_interruption, don't need to:
-// - printk first extint (debug only)
-// - check for interrupt collection enabled (routine will force on)
-// - set ifa (not valid for extint)
-// - set iha (not valid for extint)
-// - set itir (not valid for extint)
-// DO need to
-// - increment the HYPER_SSM_I fast_hyperprivop counter
-// - set shared_mem iip to instruction after HYPER_SSM_I
-// - set cr.iip to guest iva+0x3000
-// - set shared_mem ipsr to [vcpu_get_ipsr_int_state]
-// be = pp = bn = 0; dt = it = rt = 1; cpl = 3 or 0;
-// i = shared_mem interrupt_delivery_enabled
-// ic = shared_mem interrupt_collection_enabled
-// ri = instruction after HYPER_SSM_I
-// all other bits unchanged from real cr.ipsr
-// - set cr.ipsr (DELIVER_PSR_SET/CLEAR, don't forget cpl!)
-// - set shared_mem isr: isr.ei to instr following HYPER_SSM_I
-// and isr.ri to cr.isr.ri (all other bits zero)
-// - cover and set shared_mem precover_ifs to cr.ifs
-// ^^^ MISSED THIS FOR fast_break??
-// - set shared_mem interrupt_delivery_enabled to 0
-// - set shared_mem interrupt_collection_enabled to 0
-// - set r31 to SHAREDINFO_ADDR
-// - virtual bank switch 0
-// maybe implement later
-// - verify that there really IS a deliverable interrupt pending
-// - set shared_mem iva
-// needs to be done but not implemented (in reflect_interruption)
-// - set shared_mem iipa
-// don't know for sure
-// - set shared_mem unat
-// r16 == cr.isr
-// r17 == cr.iim
-// r18 == XSI_PSR_IC
-// r19 == ipsr.cpl
-// r31 == pr
-ENTRY(hyper_ssm_i)
-#ifndef FAST_SSM_I
- br.spnt.few dispatch_break_fault ;;
-#endif
- // give up for now if: ipsr.be==1, ipsr.pp==1
- mov r30=cr.ipsr
- mov r29=cr.iip;;
- tbit.nz p7,p0=r30,IA64_PSR_PP_BIT
-(p7) br.spnt.many dispatch_break_fault ;;
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SSM_I);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- // set shared_mem iip to instruction after HYPER_SSM_I
- tbit.nz p6,p7=r30,IA64_PSR_RI_BIT+1 ;; // cr.ipsr.ri >= 2 ?
-(p6) mov r20=0
-(p6) adds r29=16,r29
-(p7) adds r20=1,r20 ;;
- dep r30=r20,r30,IA64_PSR_RI_BIT,2 // adjust cr.ipsr.ri but don't save yet
- adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r29 ;;
- // set shared_mem isr
- extr.u r16=r16,IA64_ISR_IR_BIT,1;; // grab cr.isr.ir bit
- dep r16=r16,r0,IA64_ISR_IR_BIT,1;; // insert into cr.isr (rest of bits zero)
- dep r16=r20,r16,IA64_PSR_RI_BIT,2 // deposit cr.isr.ri
- adds r21=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r16
- // set cr.ipsr
- mov r29=r30
- movl r28=DELIVER_PSR_SET
- movl r27=~(DELIVER_PSR_CLR & (~IA64_PSR_CPL));;
- and r29=r29,r27;;
- or r29=r29,r28;;
- // set hpsr_dfh to ipsr
- adds r28=XSI_HPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
- ld1 r28=[r28];;
- dep r29=r28,r29,IA64_PSR_DFH_BIT,1;;
- mov cr.ipsr=r29;;
- // set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
- extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
- cmp.eq p7,p0=CONFIG_CPL0_EMUL,r29;;
-(p7) dep r30=0,r30,IA64_PSR_CPL0_BIT,2
- // FOR SSM_I ONLY, also turn on psr.i and psr.ic
- movl r28=(IA64_PSR_DT|IA64_PSR_IT|IA64_PSR_RT|IA64_PSR_I|IA64_PSR_IC)
-// movl r27=~(IA64_PSR_BE|IA64_PSR_PP|IA64_PSR_BN);;
- movl r27=~IA64_PSR_BN;;
- or r30=r30,r28;;
- and r30=r30,r27;;
- mov r20=1
- movl r22=THIS_CPU(current_psr_i_addr)
- adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r22=[r22]
- adds r27=XSI_VPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
- ld1 r28=[r27];;
- st1 [r27]=r0
- dep r30=r28,r30,IA64_PSR_DFH_BIT,1
- ;;
- st8 [r21]=r30;;
- // set shared_mem interrupt_delivery_enabled to 0
- // set shared_mem interrupt_collection_enabled to 0
- st1 [r22]=r20
- st4 [r18]=r0
- // cover and set shared_mem precover_ifs to cr.ifs
- // set shared_mem ifs to 0
- cover ;;
- mov r20=cr.ifs
- adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r0 ;;
- adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r20 ;;
- // leave cr.ifs alone for later rfi
- // set iip to go to event callback handler
- movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r22=[r22];;
- adds r22=IA64_VCPU_EVENT_CALLBACK_IP_OFFSET,r22;;
- ld8 r24=[r22];;
- mov cr.iip=r24;;
- // OK, now all set to go except for switch to virtual bank0
- mov r30=r2
- mov r29=r3
- ;;
- adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18
- adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18
- // temporarily save ar.unat
- mov r28=ar.unat
- bsw.1;;
- // FIXME?: ar.unat is not really handled correctly,
- // but may not matter if the OS is NaT-clean
- .mem.offset 0,0; st8.spill [r2]=r16,16
- .mem.offset 8,0; st8.spill [r3]=r17,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r18,16
- .mem.offset 8,0; st8.spill [r3]=r19,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r20,16
- .mem.offset 8,0; st8.spill [r3]=r21,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r22,16
- .mem.offset 8,0; st8.spill [r3]=r23,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r24,16
- .mem.offset 8,0; st8.spill [r3]=r25,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r26,16
- .mem.offset 8,0; st8.spill [r3]=r27,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r28,16
- .mem.offset 8,0; st8.spill [r3]=r29,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r30,16
- .mem.offset 8,0; st8.spill [r3]=r31,16 ;;
- bsw.0 ;;
- mov r27=ar.unat
- adds r26=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 ;;
- //save bank1 ar.unat
- st8 [r26]=r27
- //restore ar.unat
- mov ar.unat=r28
- mov r2=r30
- mov r3=r29
- adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
- st4 [r20]=r0
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_ssm_i)
-
-// reflect domain clock interrupt
-// r31 == pr
-// r30 == cr.ivr
-// r29 == rp
-GLOBAL_ENTRY(fast_tick_reflect)
-#ifndef FAST_TICK // see beginning of file
- br.cond.sptk.many rp;;
-#endif
- mov r28=IA64_TIMER_VECTOR;;
- cmp.ne p6,p0=r28,r30
-(p6) br.cond.spnt.few rp;;
- movl r20=THIS_CPU(cpu_info)+IA64_CPUINFO_ITM_NEXT_OFFSET;;
- ld8 r26=[r20]
- mov r27=ar.itc;;
- adds r27=200,r27;; // safety margin
- cmp.ltu p6,p0=r26,r27
-(p6) br.cond.spnt.few rp;;
- mov r17=cr.ipsr;;
- // slow path if: ipsr.pp==1
- tbit.nz p6,p0=r17,IA64_PSR_PP_BIT
-(p6) br.cond.spnt.few rp;;
- // definitely have a domain tick
- mov cr.eoi=r0
- mov rp=r29
- mov cr.itm=r26 // ensure next tick
-#ifdef FAST_REFLECT_CNT
- movl r20=PERFC(fast_reflect + (0x3000>>8));;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- // vcpu_pend_timer(current)
- movl r18=THIS_CPU(current_psr_ic_addr)
- ;;
- ld8 r18=[r18]
- ;;
- adds r20=XSI_ITV_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r20=[r20];;
- cmp.eq p6,p0=r20,r0 // if cr.itv==0 done
-(p6) br.cond.spnt.few fast_tick_reflect_done;;
- tbit.nz p6,p0=r20,16;; // check itv.m (discard) bit
-(p6) br.cond.spnt.few fast_tick_reflect_done;;
- extr.u r27=r20,0,6 // r27 has low 6 bits of itv.vector
- extr.u r26=r20,6,2 // r26 has irr index of itv.vector
- movl r19=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r19=[r19];;
- adds r22=IA64_VCPU_DOMAIN_ITM_LAST_OFFSET,r19
- adds r23=IA64_VCPU_DOMAIN_ITM_OFFSET,r19;;
- ld8 r24=[r22]
- ld8 r23=[r23];;
- cmp.eq p6,p0=r23,r24 // skip if this tick already delivered
-(p6) br.cond.spnt.few fast_tick_reflect_done;;
- // set irr bit
- adds r21=IA64_VCPU_IRR0_OFFSET,r19
- shl r26=r26,3;;
- add r21=r21,r26
- mov r25=1;;
- shl r22=r25,r27
- ld8 r23=[r21];;
- or r22=r22,r23;;
- st8 [r21]=r22
- // set evtchn_upcall_pending!
- adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18;;
- ld8 r20=[r20];;
- adds r20=-1,r20;; // evtchn_upcall_pending
- st1 [r20]=r25
- // if interrupted at pl0, we're done
- extr.u r16=r17,IA64_PSR_CPL0_BIT,2;;
- cmp.eq p6,p0=r16,r0;;
-(p6) br.cond.spnt.few fast_tick_reflect_done;;
- // if guest vpsr.i is off, we're done
- movl r21=THIS_CPU(current_psr_i_addr);;
- ld8 r21=[r21];;
- ld1 r21=[r21];;
- cmp.eq p0,p6=r21,r0
-(p6) br.cond.spnt.few fast_tick_reflect_done;;
-
- // OK, we have a clock tick to deliver to the active domain!
- // so deliver to iva+0x3000
- // r17 == cr.ipsr
- // r18 == XSI_PSR_IC
- // r19 == IA64_KR(CURRENT)
- // r31 == pr
- mov r16=cr.isr
- mov r29=cr.iip
- adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r29
- // set shared_mem isr
- extr.u r16=r16,IA64_ISR_IR_BIT,1;; // grab cr.isr.ir bit
- dep r16=r16,r0,IA64_ISR_IR_BIT,1 // insert into cr.isr (rest of bits zero)
- extr.u r20=r17,IA64_PSR_RI_BIT,2;; // get ipsr.ri
- dep r16=r20,r16,IA64_PSR_RI_BIT,2 // deposit cr.isr.ei
- adds r21=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18;;
- st8 [r21]=r16
- // set cr.ipsr (make sure cpl==2!)
- mov r29=r17
- movl r28=DELIVER_PSR_SET | (CONFIG_CPL0_EMUL << IA64_PSR_CPL0_BIT)
- movl r27=~DELIVER_PSR_CLR;;
- and r29=r29,r27;;
- or r29=r29,r28;;
- mov cr.ipsr=r29;;
- // set shared_mem ipsr (from ipsr in r17 with ipsr.ri already set)
- extr.u r29=r17,IA64_PSR_CPL0_BIT,2;;
- cmp.eq p7,p0=CONFIG_CPL0_EMUL,r29;;
-(p7) dep r17=0,r17,IA64_PSR_CPL0_BIT,2
- movl r28=(IA64_PSR_DT|IA64_PSR_IT|IA64_PSR_RT)
- movl r27=~(IA64_PSR_PP|IA64_PSR_BN|IA64_PSR_I|IA64_PSR_IC);;
- or r17=r17,r28;;
- and r17=r17,r27
- ld4 r16=[r18];;
- cmp.ne p6,p0=r16,r0
- movl r22=THIS_CPU(current_psr_i_addr);;
- ld8 r22=[r22]
-(p6) dep r17=-1,r17,IA64_PSR_IC_BIT,1 ;;
- ld1 r16=[r22];;
- cmp.eq p6,p0=r16,r0;;
-(p6) dep r17=-1,r17,IA64_PSR_I_BIT,1
- mov r20=1
- adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18;;
- st8 [r21]=r17
- // set shared_mem interrupt_delivery_enabled to 0
- // set shared_mem interrupt_collection_enabled to 0
- st1 [r22]=r20
- st4 [r18]=r0;;
- // cover and set shared_mem precover_ifs to cr.ifs
- // set shared_mem ifs to 0
- cover ;;
- mov r20=cr.ifs
- adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r0 ;;
- adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r20
- // leave cr.ifs alone for later rfi
- // set iip to go to domain IVA break instruction vector
- adds r22=IA64_VCPU_IVA_OFFSET,r19;;
- ld8 r23=[r22]
- movl r24=0x3000;;
- add r24=r24,r23;;
- mov cr.iip=r24
- // OK, now all set to go except for switch to virtual bank0
- mov r30=r2
- mov r29=r3
-#ifdef HANDLE_AR_UNAT
- mov r28=ar.unat
-#endif
- ;;
- adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18
- adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18
- ;;
- bsw.1;;
- .mem.offset 0,0; st8.spill [r2]=r16,16
- .mem.offset 8,0; st8.spill [r3]=r17,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r18,16
- .mem.offset 8,0; st8.spill [r3]=r19,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r20,16
- .mem.offset 8,0; st8.spill [r3]=r21,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r22,16
- .mem.offset 8,0; st8.spill [r3]=r23,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r24,16
- .mem.offset 8,0; st8.spill [r3]=r25,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r26,16
- .mem.offset 8,0; st8.spill [r3]=r27,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r28,16
- .mem.offset 8,0; st8.spill [r3]=r29,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r30,16
- .mem.offset 8,0; st8.spill [r3]=r31,16 ;;
-#ifdef HANDLE_AR_UNAT
- // r16~r23 are preserved regsin bank0 regs, we need to restore them,
- // r24~r31 are scratch regs, we don't need to handle NaT bit,
- // because OS handler must assign it before access it
- ld8 r16=[r2],16
- ld8 r17=[r3],16;;
- ld8 r18=[r2],16
- ld8 r19=[r3],16;;
- ld8 r20=[r2],16
- ld8 r21=[r3],16;;
- ld8 r22=[r2],16
- ld8 r23=[r3],16;;
-#endif
- ;;
- bsw.0 ;;
- mov r24=ar.unat
- mov r2=r30
- mov r3=r29
-#ifdef HANDLE_AR_UNAT
- mov ar.unat=r28
-#endif
- ;;
- adds r25=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18
- adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r25]=r24
- st4 [r20]=r0
-fast_tick_reflect_done:
- mov pr=r31,-1 ;;
- rfi
-END(fast_tick_reflect)
-
-// reflect domain breaks directly to domain
-// r16 == cr.isr
-// r17 == cr.iim
-// r18 == XSI_PSR_IC
-// r19 == ipsr.cpl
-// r31 == pr
-GLOBAL_ENTRY(fast_break_reflect)
-#ifndef FAST_BREAK // see beginning of file
- br.sptk.many dispatch_break_fault ;;
-#endif
- mov r30=cr.ipsr
- mov r29=cr.iip;;
- tbit.nz p7,p0=r30,IA64_PSR_PP_BIT
-(p7) br.spnt.few dispatch_break_fault ;;
- movl r20=IA64_PSR_CPL ;;
- and r22=r20,r30 ;;
- cmp.ne p7,p0=r22,r0
-(p7) br.spnt.many 1f ;;
- cmp.eq p7,p0=r17,r0
-(p7) br.spnt.few dispatch_break_fault ;;
-#ifdef CRASH_DEBUG
- movl r21=CDB_BREAK_NUM ;;
- cmp.eq p7,p0=r17,r21
-(p7) br.spnt.few dispatch_break_fault ;;
-#endif
-1:
-#if 1 /* special handling in case running on simulator */
- movl r20=first_break;;
- ld4 r23=[r20]
- movl r21=0x80001
- movl r22=0x80002;;
- cmp.ne p7,p0=r23,r0
-(p7) br.spnt.few dispatch_break_fault ;;
- cmp.eq p7,p0=r21,r17
-(p7) br.spnt.few dispatch_break_fault ;;
- cmp.eq p7,p0=r22,r17
-(p7) br.spnt.few dispatch_break_fault ;;
-#endif
- movl r20=0x2c00
- // save iim in shared_info
- adds r21=XSI_IIM_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r17;;
- // fall through
-END(fast_break_reflect)
-
-// reflect to domain ivt+r20
-// sets up isr,iip,ipsr,ifs (FIXME: do iipa too)
-// r16 == cr.isr
-// r18 == XSI_PSR_IC
-// r20 == offset into ivt
-// r29 == iip
-// r30 == ipsr
-// r31 == pr
-ENTRY(fast_reflect)
-#ifdef FAST_REFLECT_CNT
- movl r22=PERFC(fast_reflect)
- shr r23=r20,8-2;;
- add r22=r22,r23;;
- ld4 r21=[r22];;
- adds r21=1,r21;;
- st4 [r22]=r21;;
-#endif
- // save iip in shared_info (DON'T POINT TO NEXT INSTRUCTION!)
- adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r29,XSI_ISR_OFS-XSI_IIP_OFS;;
- // set shared_mem isr
- st8 [r21]=r16 ;;
- // set cr.ipsr
- movl r21=THIS_CPU(current_psr_i_addr)
- mov r29=r30 ;;
- ld8 r21=[r21]
- movl r28=DELIVER_PSR_SET | (CONFIG_CPL0_EMUL << IA64_PSR_CPL0_BIT)
- movl r27=~DELIVER_PSR_CLR;;
- and r29=r29,r27;;
- or r29=r29,r28;;
- // set hpsr_dfh to ipsr
- adds r28=XSI_HPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
- ld1 r28=[r28];;
- dep r29=r28,r29,IA64_PSR_DFH_BIT,1;;
- mov cr.ipsr=r29;;
- // set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
- extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
- cmp.eq p7,p0=CONFIG_CPL0_EMUL,r29;;
-(p7) dep r30=0,r30,IA64_PSR_CPL0_BIT,2
- movl r28=(IA64_PSR_DT|IA64_PSR_IT|IA64_PSR_RT)
- movl r27=~(IA64_PSR_PP|IA64_PSR_BN);;
- or r30=r30,r28;;
- and r30=r30,r27
- // also set shared_mem ipsr.i and ipsr.ic appropriately
- ld1 r22=[r21]
- ld4 r24=[r18];;
- cmp4.eq p6,p7=r24,r0;;
-(p6) dep r30=0,r30,IA64_PSR_IC_BIT,1
-(p7) dep r30=-1,r30,IA64_PSR_IC_BIT,1
- mov r24=r21
- cmp.ne p6,p7=r22,r0;;
-(p6) dep r30=0,r30,IA64_PSR_I_BIT,1
-(p7) dep r30=-1,r30,IA64_PSR_I_BIT,1
- mov r22=1
- adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18
- adds r27=XSI_VPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
- ld1 r28=[r27];;
- st1 [r27]=r0
- dep r30=r28,r30,IA64_PSR_DFH_BIT,1
- ;;
- st8 [r21]=r30
- // set shared_mem interrupt_delivery_enabled to 0
- // set shared_mem interrupt_collection_enabled to 0
- st1 [r24]=r22
- st4 [r18]=r0;;
- // cover and set shared_mem precover_ifs to cr.ifs
- // set shared_mem ifs to 0
- cover ;;
- mov r24=cr.ifs
- adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r0 ;;
- adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r24
- // FIXME: need to save iipa and isr to be arch-compliant
- // set iip to go to domain IVA break instruction vector
- movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r22=[r22];;
- adds r22=IA64_VCPU_IVA_OFFSET,r22;;
- ld8 r23=[r22];;
- add r20=r20,r23;;
- mov cr.iip=r20
- // OK, now all set to go except for switch to virtual bank0
- mov r30=r2
- mov r29=r3
-#ifdef HANDLE_AR_UNAT
- mov r28=ar.unat
-#endif
- ;;
- adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18
- adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18
- ;;
- bsw.1;;
- .mem.offset 0,0; st8.spill [r2]=r16,16
- .mem.offset 8,0; st8.spill [r3]=r17,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r18,16
- .mem.offset 8,0; st8.spill [r3]=r19,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r20,16
- .mem.offset 8,0; st8.spill [r3]=r21,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r22,16
- .mem.offset 8,0; st8.spill [r3]=r23,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r24,16
- .mem.offset 8,0; st8.spill [r3]=r25,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r26,16
- .mem.offset 8,0; st8.spill [r3]=r27,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r28,16
- .mem.offset 8,0; st8.spill [r3]=r29,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r30,16
- .mem.offset 8,0; st8.spill [r3]=r31,16 ;;
-#ifdef HANDLE_AR_UNAT
- // r16~r23 are preserved regs in bank0 regs, we need to restore them,
- // r24~r31 are scratch regs, we don't need to handle NaT bit,
- // because OS handler must assign it before access it
- ld8 r16=[r2],16
- ld8 r17=[r3],16;;
- ld8 r18=[r2],16
- ld8 r19=[r3],16;;
- ld8 r20=[r2],16
- ld8 r21=[r3],16;;
- ld8 r22=[r2],16
- ld8 r23=[r3],16;;
-#endif
- ;;
- bsw.0 ;;
- mov r24=ar.unat
- mov r2=r30
- mov r3=r29
-#ifdef HANDLE_AR_UNAT
- mov ar.unat=r28
-#endif
- ;;
- adds r25=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18
- adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r25]=r24
- st4 [r20]=r0
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(fast_reflect)
-
-// reflect access faults (0x2400,0x2800,0x5300) directly to domain
-// r16 == isr
-// r17 == ifa
-// r19 == reflect number (only pass-thru to dispatch_reflection)
-// r20 == offset into ivt
-// r31 == pr
-GLOBAL_ENTRY(fast_access_reflect)
-#ifndef FAST_ACCESS_REFLECT // see beginning of file
- br.spnt.few dispatch_reflection ;;
-#endif
- mov r30=cr.ipsr
- mov r29=cr.iip;;
- tbit.nz p7,p0=r30,IA64_PSR_PP_BIT
-(p7) br.spnt.few dispatch_reflection ;;
- extr.u r21=r30,IA64_PSR_CPL0_BIT,2 ;;
- cmp.eq p7,p0=r21,r0
-(p7) br.spnt.few dispatch_reflection ;;
- movl r18=THIS_CPU(current_psr_ic_addr);;
- ld8 r18=[r18];;
- ld4 r21=[r18];;
- cmp.eq p7,p0=r0,r21
-(p7) br.spnt.few dispatch_reflection ;;
- // set shared_mem ifa, FIXME: should we validate it?
- mov r17=cr.ifa
- adds r21=XSI_IFA_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r17 ;;
- // get rr[ifa] and save to itir in shared memory (extra bits ignored)
- shr.u r22=r17,61
- adds r23=XSI_ITIR_OFS-XSI_PSR_IC_OFS,r18
- adds r21=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18 ;;
- shladd r22=r22,3,r21;;
- ld8 r22=[r22];;
- and r22=~3,r22;;
- st8 [r23]=r22;;
- br.cond.sptk.many fast_reflect;;
-END(fast_access_reflect)
-
-// when we get to here, VHPT_CCHAIN_LOOKUP has failed and everything
-// is as it was at the time of original miss. We want to preserve that
-// so if we get a nested fault, we can just branch to page_fault
-GLOBAL_ENTRY(fast_tlb_miss_reflect)
-#ifndef FAST_TLB_MISS_REFLECT // see beginning of file
- br.spnt.few page_fault ;;
-#else
- mov r31=pr
- mov r30=cr.ipsr
- mov r29=cr.iip
- mov r16=cr.isr
- mov r17=cr.ifa;;
- // for now, always take slow path for region 0 (e.g. metaphys mode)
- extr.u r21=r17,61,3;;
- cmp.eq p7,p0=r0,r21
-(p7) br.spnt.few page_fault ;;
- // always take slow path for PL0 (e.g. __copy_from_user)
- extr.u r21=r30,IA64_PSR_CPL0_BIT,2 ;;
- cmp.eq p7,p0=r21,r0
-(p7) br.spnt.few page_fault ;;
- // slow path if strange ipsr or isr bits set
- tbit.nz p7,p0=r30,IA64_PSR_PP_BIT,1
-(p7) br.spnt.few page_fault ;;
- movl r21=IA64_ISR_IR|IA64_ISR_SP|IA64_ISR_NA ;;
- and r21=r16,r21;;
- cmp.ne p7,p0=r0,r21
-(p7) br.spnt.few page_fault ;;
- // also take slow path if virtual psr.ic=0
- movl r18=XSI_PSR_IC;;
- ld4 r21=[r18];;
- cmp.eq p7,p0=r0,r21
-(p7) br.spnt.few page_fault ;;
- // OK, if we get to here, we are doing a fast vcpu_translate. Need to:
- // 1) look in the virtual TR's (pinned), if not there
- // 2) look in the 1-entry TLB (pinned), if not there
- // 3) check the domain VHPT (NOT pinned, accesses domain memory!)
- // If we find it in any of these places, we need to effectively do
- // a hyper_itc_i/d
-
- // short-term hack for now, if in region 5-7, take slow path
- // since all Linux TRs are in region 5 or 7, we need not check TRs
- extr.u r21=r17,61,3;;
- cmp.le p7,p0=5,r21
-(p7) br.spnt.few page_fault ;;
-fast_tlb_no_tr_match:
- movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r27=[r27]
- tbit.nz p6,p7=r16,IA64_ISR_X_BIT;;
-(p6) adds r25=IA64_VCPU_ITLB_OFFSET,r27
-(p7) adds r25=IA64_VCPU_DTLB_OFFSET,r27;;
- ld8 r20=[r25],8;;
- tbit.z p7,p0=r20,VTLB_PTE_P_BIT // present?
-(p7) br.cond.spnt.few 1f;;
- // if ifa is in range of tlb, don't bother to check rid, go slow path
- ld8 r21=[r25],8;;
- mov r23=1
- extr.u r21=r21,IA64_ITIR_PS,IA64_ITIR_PS_LEN;;
- shl r22=r23,r21
- ld8 r21=[r25],8;;
- cmp.ltu p7,p0=r17,r21
-(p7) br.cond.sptk.many 1f;
- add r21=r22,r21;;
- cmp.ltu p7,p0=r17,r21
-(p7) br.cond.spnt.few page_fault;;
-
-1: // check the guest VHPT
- adds r19 = XSI_PTA_OFS-XSI_PSR_IC_OFS, r18;;
- ld8 r19=[r19]
- // if (!rr.ve || !(pta & IA64_PTA_VE)) take slow way for now
- // FIXME: later, we deliver an alt_d/i vector after thash and itir
- extr.u r25=r17,61,3
- adds r21=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18 ;;
- shl r25=r25,3;;
- add r21=r21,r25;;
- ld8 r22=[r21];;
- tbit.z p7,p0=r22,0
-(p7) br.cond.spnt.few page_fault;;
- tbit.z p7,p0=r19,IA64_PTA_VE_BIT
-(p7) br.cond.spnt.few page_fault;;
- tbit.nz p7,p0=r19,IA64_PTA_VF_BIT // long format VHPT
-(p7) br.cond.spnt.few page_fault;;
-
- // compute and save away itir (r22 & RR_PS_MASK)
- movl r21=IA64_ITIR_PS_MASK;;
- and r22=r22,r21;;
- adds r21=XSI_ITIR_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r22;;
-
- // save away ifa
- adds r21=XSI_IFA_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r17;;
- // see vcpu_thash to save away iha
- shr.u r20 = r17, 61
- addl r25 = 1, r0
- movl r30 = 0xe000000000000000
- ;;
- and r21 = r30, r17 // VHPT_Addr1
- ;;
- shladd r28 = r20, 3, r18
- adds r19 = XSI_PTA_OFS-XSI_PSR_IC_OFS, r18
- ;;
- adds r27 = XSI_RR0_OFS-XSI_PSR_IC_OFS, r28
- addl r28 = 32767, r0
- ld8 r24 = [r19] // pta
- ;;
- ld8 r23 = [r27] // rrs[vadr>>61]
- extr.u r26 = r24, IA64_PTA_SIZE_BIT, IA64_PTA_SIZE_LEN
- ;;
- extr.u r22 = r23, IA64_RR_PS, IA64_RR_PS_LEN
- shl r30 = r25, r26 // pt size
- ;;
- shr.u r19 = r17, r22 // ifa pg number
- shr.u r29 = r24, IA64_PTA_BASE_BIT
- adds r30 = -1, r30 // pt size mask
- ;;
- shladd r27 = r19, 3, r0 // vhpt offset
- extr.u r26 = r30, 15, 46
- ;;
- andcm r24 = r29, r26
- and r19 = r28, r27
- shr.u r25 = r27, 15
- ;;
- and r23 = r26, r25
- ;;
- or r22 = r24, r23
- ;;
- dep.z r20 = r22, 15, 46
- ;;
- or r30 = r20, r21
- ;;
- //or r8 = r19, r30
- or r19 = r19, r30
- ;;
- adds r23=XSI_IHA_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r23]=r19
- // done with thash, check guest VHPT
-
- adds r20 = XSI_PTA_OFS-XSI_PSR_IC_OFS, r18;;
- ld8 r24 = [r20];; // pta
- // avoid recursively walking the VHPT
- // if (((r17=address ^ r24=pta) & ((itir_mask(pta) << 3) >> 3)) != 0) {
- mov r20=-8
- xor r21=r17,r24
- extr.u r24=r24,IA64_PTA_SIZE_BIT,IA64_PTA_SIZE_LEN;;
- shl r20=r20,r24;;
- shr.u r20=r20,3;;
- and r21=r20,r21;;
- cmp.eq p7,p0=r21,r0
-(p7) br.cond.spnt.few 1f;;
- // __copy_from_user(&pte, r19=(void *)(*iha), sizeof(pte)=8)
- // prepare for possible nested dtlb fault
- mov r29=b0
- movl r30=guest_vhpt_miss
- // now go fetch the entry from the guest VHPT
- ld8 r20=[r19];;
- // if we wind up here, we successfully loaded the VHPT entry
-
- // this VHPT walker aborts on non-present pages instead
- // of inserting a not-present translation, this allows
- // vectoring directly to the miss handler
- tbit.z p7,p0=r20,0
-(p7) br.cond.spnt.few page_not_present;;
-
-#ifdef FAST_REFLECT_CNT
- movl r21=PERFC(fast_vhpt_translate);;
- ld4 r22=[r21];;
- adds r22=1,r22;;
- st4 [r21]=r22;;
-#endif
-
-// prepare for fast_insert(PSCB(ifa),PSCB(itir),r16=pte)
-// r16 == pte
-// r17 == bit0: 1=inst, 0=data; bit1: 1=itc, 0=vcpu_translate
-// r18 == XSI_PSR_IC_OFS
-// r24 == ps
-// r29 == saved value of b0 in case of recovery
-// r30 == recovery ip if failure occurs
-// r31 == pr
- tbit.nz p6,p7=r16,IA64_ISR_X_BIT;;
-(p6) mov r17=1
-(p7) mov r17=0
- mov r16=r20
- mov r29=b0
- movl r30=recover_and_page_fault
- adds r21=XSI_ITIR_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r24=[r21];;
- extr.u r24=r24,IA64_ITIR_PS,IA64_ITIR_PS_LEN
- // IFA already in PSCB
- br.cond.sptk.many fast_insert;;
-END(fast_tlb_miss_reflect)
-
-// we get here if fast_insert fails (e.g. due to metaphysical lookup)
-ENTRY(recover_and_page_fault)
-#ifdef PERF_COUNTERS
- movl r21=PERFC(recover_to_page_fault);;
- ld4 r22=[r21];;
- adds r22=1,r22;;
- st4 [r21]=r22;;
-#endif
- mov b0=r29
- br.cond.sptk.many page_fault;;
-
-// if we wind up here, we missed in guest VHPT so recover
-// from nested dtlb fault and reflect a tlb fault to the guest
-guest_vhpt_miss:
- mov b0=r29
- // fault = IA64_VHPT_FAULT
- mov r20=r0
- br.cond.sptk.many 1f;
-
- // if we get to here, we are ready to reflect
- // need to set up virtual ifa, iha, itir (fast_reflect handles
- // virtual isr, iip, ipsr, ifs
- // see vcpu_get_itir_on_fault: get ps,rid,(FIXME key) from rr[ifa]
-page_not_present:
- tbit.nz p6,p7=r16,IA64_ISR_X_BIT;;
-(p6) movl r20=0x400
-(p7) movl r20=0x800
-
-1: extr.u r25=r17,61,3;;
- adds r21=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18
- shl r25=r25,3;;
- add r21=r21,r25;;
- ld8 r22=[r21];;
- extr.u r22=r22,IA64_RR_PS,IA64_RR_PS_LEN+IA64_RR_RID_LEN;;
- dep.z r22=r22,IA64_RR_PS,IA64_RR_PS_LEN+IA64_RR_RID_LEN
- adds r23=XSI_ITIR_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r23]=r22
-
- // fast reflect expects
- // r16 == cr.isr
- // r18 == XSI_PSR_IC
- // r20 == offset into ivt
- // r29 == iip
- // r30 == ipsr
- // r31 == pr
- //mov r16=cr.isr
- mov r29=cr.iip
- mov r30=cr.ipsr
- br.sptk.many fast_reflect;;
-#endif
-END(fast_tlb_miss_reflect)
-
-ENTRY(slow_vcpu_rfi)
- adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18;;
- ld8 r22=[r22];;
- tbit.z p6,p0=r22,63
-(p6) br.spnt.few dispatch_break_fault ;;
- // If vifs.v is set, we have two IFS to consider:
- // * the guest IFS
- // * the hypervisor IFS (validated by cover)
- // Because IFS is copied to CFM and is used to adjust AR.BSP,
- // virtualization of rfi is not easy.
- // Previously there was a two steps method (a first rfi jumped to
- // a stub which performed a new rfi).
- // This new method discards the RS before executing the hypervisor
- // cover. After cover, IFS.IFM will be zero. This IFS would simply
- // clear CFM but not modifying AR.BSP. Therefore the guest IFS can
- // be used instead and there is no need of a second rfi.
- // Discarding the RS with the following alloc instruction just clears
- // CFM, which is safe because rfi will overwrite it.
- // There is a drawback: because the RS must be discarded before
- // executing C code, emulation of rfi must go through an hyperprivop
- // and not through normal instruction decoding.
- alloc r22=ar.pfs,0,0,0,0
- br.spnt.few dispatch_break_fault
- ;;
-END(slow_vcpu_rfi)
-
-// ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
-ENTRY(hyper_rfi)
-#ifndef FAST_RFI
- br.spnt.few slow_vcpu_rfi ;;
-#endif
- // if interrupts pending and vcr.ipsr.i=1, do it the slow way
- adds r19=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18
- adds r23=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18
- cmp.ne p8,p0=r20,r0;; // evtchn_upcall_pending != 0
- // if (!(vpsr.dt && vpsr.rt && vpsr.it)), do it the slow way
- ld8 r21=[r19],XSI_IIP_OFS-XSI_IPSR_OFS // r21=vcr.ipsr
- movl r20=~(IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT);;
- or r20=r20,r21
- // p8 determines whether we might deliver an immediate extint
-(p8) tbit.nz p8,p0=r21,IA64_PSR_I_BIT;;
- cmp.ne p7,p0=-1,r20
- ld4 r23=[r23] // r23=metaphysical_mode
-#ifndef RFI_TO_INTERRUPT // see beginning of file
-(p8) br.cond.spnt.few slow_vcpu_rfi
-#endif
-(p7) br.spnt.few slow_vcpu_rfi;;
- // if was in metaphys mode, do it the slow way (FIXME later?)
- cmp.ne p7,p0=r23,r0
- ld8 r22=[r19] // r22=vcr.iip
-(p7) br.spnt.few slow_vcpu_rfi;;
- // OK now, let's do an rfi.
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_RFI);;
- ld4 r23=[r20];;
- adds r23=1,r23;;
- st4 [r20]=r23;;
-#endif
-#ifdef RFI_TO_INTERRUPT
- // maybe do an immediate interrupt delivery?
-(p8) br.cond.spnt.few rfi_check_extint;;
-#endif
-
-just_do_rfi:
- // r18=&vpsr.i|vpsr.ic, r21==vpsr, r22=vcr.iip
- mov cr.iip=r22
- extr.u r19=r21,IA64_PSR_CPL0_BIT,2
- adds r20=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
- cmp.gtu p7,p0=CONFIG_CPL0_EMUL,r19
- ld8 r20=[r20];;
-(p7) mov r19=CONFIG_CPL0_EMUL
- dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
- mov cr.ifs=r20 ;;
- // ipsr.cpl = max(vcr.ipsr.cpl, IA64_PSR_CPL0_BIT);
- movl r20=THIS_CPU(current_psr_i_addr)
- dep r21=r19,r21,IA64_PSR_CPL0_BIT,2;;
- // vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic
- ld8 r20=[r20]
- mov r19=1
- tbit.nz p7,p6=r21,IA64_PSR_I_BIT
- tbit.nz p9,p8=r21,IA64_PSR_IC_BIT;;
- // not done yet
-(p7) st1 [r20]=r0
-(p6) st1 [r20]=r19
-(p9) st4 [r18]=r19
-(p8) st4 [r18]=r0
- // force on psr.ic, i, dt, rt, it, bn
- movl r20=(IA64_PSR_I|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT| \
- IA64_PSR_IT|IA64_PSR_BN)
- // keep cr.ipsr.pp and set vPSR.pp = vIPSR.pp
- mov r22=cr.ipsr
- ;;
- or r21=r21,r20
- tbit.z p10,p11 = r22, IA64_PSR_PP_BIT
- ;;
- adds r20=XSI_VPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
- tbit.z p8,p9 = r21, IA64_PSR_DFH_BIT
- adds r23=XSI_VPSR_PP_OFS-XSI_PSR_IC_OFS,r18
- ;;
- (p9) mov r27=1;;
- (p9) st1 [r20]=r27
- dep r21=r22,r21,IA64_PSR_PP_BIT,1
- (p10) st1 [r23]=r0
- (p11) st1 [r23]=r27
- ;;
- (p8) st1 [r20]=r0
- (p8) adds r20=XSI_HPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
- (p8) ld1 r27=[r20]
- ;;
- (p8) dep r21=r27,r21, IA64_PSR_DFH_BIT, 1
- ;;
- mov cr.ipsr=r21
- adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld4 r21=[r20];;
- cmp.ne p7,p0=r21,r0 // domain already did "bank 1 switch?"
-(p7) br.cond.spnt.few 1f;
- // OK, now all set to go except for switch to virtual bank1
- mov r22=1;;
- st4 [r20]=r22
- mov r30=r2
- mov r29=r3
- mov r17=ar.unat;;
- adds r16=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18
- adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18
- adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18;;
- ld8 r16=[r16];;
- mov ar.unat=r16;;
- bsw.1;;
- // FIXME?: ar.unat is not really handled correctly,
- // but may not matter if the OS is NaT-clean
- .mem.offset 0,0; ld8.fill r16=[r2],16
- .mem.offset 8,0; ld8.fill r17=[r3],16 ;;
- .mem.offset 0,0; ld8.fill r18=[r2],16
- .mem.offset 0,0; ld8.fill r19=[r3],16 ;;
- .mem.offset 8,0; ld8.fill r20=[r2],16
- .mem.offset 8,0; ld8.fill r21=[r3],16 ;;
- .mem.offset 8,0; ld8.fill r22=[r2],16
- .mem.offset 8,0; ld8.fill r23=[r3],16 ;;
- .mem.offset 8,0; ld8.fill r24=[r2],16
- .mem.offset 8,0; ld8.fill r25=[r3],16 ;;
- .mem.offset 8,0; ld8.fill r26=[r2],16
- .mem.offset 8,0; ld8.fill r27=[r3],16 ;;
- .mem.offset 8,0; ld8.fill r28=[r2],16
- .mem.offset 8,0; ld8.fill r29=[r3],16 ;;
- .mem.offset 8,0; ld8.fill r30=[r2],16
- .mem.offset 8,0; ld8.fill r31=[r3],16 ;;
- bsw.0 ;;
- mov ar.unat=r17
- mov r2=r30
- mov r3=r29
-1: mov pr=r31,-1
- ;;
- rfi
- ;;
-END(hyper_rfi)
-
-#ifdef RFI_TO_INTERRUPT
-ENTRY(rfi_check_extint)
- //br.sptk.many dispatch_break_fault ;;
-
- // r18=&vpsr.i|vpsr.ic, r21==vpsr, r22=vcr.iip
- // make sure none of these get trashed in case going to just_do_rfi
- movl r30=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r30=[r30];;
- adds r24=IA64_VCPU_INSVC3_OFFSET,r30
- mov r25=192
- adds r16=IA64_VCPU_IRR3_OFFSET,r30;;
- ld8 r23=[r16];;
- cmp.eq p6,p0=r23,r0;;
-(p6) adds r16=-8,r16;;
-(p6) adds r24=-8,r24;;
-(p6) adds r25=-64,r25;;
-(p6) ld8 r23=[r16];;
-(p6) cmp.eq p6,p0=r23,r0;;
-(p6) adds r16=-8,r16;;
-(p6) adds r24=-8,r24;;
-(p6) adds r25=-64,r25;;
-(p6) ld8 r23=[r16];;
-(p6) cmp.eq p6,p0=r23,r0;;
-(p6) adds r16=-8,r16;;
-(p6) adds r24=-8,r24;;
-(p6) adds r25=-64,r25;;
-(p6) ld8 r23=[r16];;
- cmp.eq p6,p0=r23,r0
-(p6) br.cond.spnt.few just_do_rfi; // this is actually an error
- // r16 points to non-zero element of irr, r23 has value
- // r24 points to corr element of insvc, r25 has elt*64
- ld8 r26=[r24];;
- cmp.geu p6,p0=r26,r23
-(p6) br.cond.spnt.many just_do_rfi;
-
- // not masked by insvc, get vector number
- shr.u r26=r23,1;;
- or r26=r23,r26;;
- shr.u r27=r26,2;;
- or r26=r26,r27;;
- shr.u r27=r26,4;;
- or r26=r26,r27;;
- shr.u r27=r26,8;;
- or r26=r26,r27;;
- shr.u r27=r26,16;;
- or r26=r26,r27;;
- shr.u r27=r26,32;;
- or r26=r26,r27;;
- andcm r26=0xffffffffffffffff,r26;;
- popcnt r26=r26;;
- sub r26=63,r26;;
- // r26 now contains the bit index (mod 64)
- mov r27=1;;
- shl r27=r27,r26;;
- // r27 now contains the (within the proper word) bit mask
- add r26=r25,r26
- // r26 now contains the vector [0..255]
- adds r20=XSI_TPR_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r20=[r20] ;;
- extr.u r29=r20,4,4
- tbit.nz p6,p0=r20,16 // if tpr.mmi is set, just rfi
-(p6) br.cond.spnt.few just_do_rfi;;
- shl r29=r29,4;;
- adds r29=15,r29;;
- cmp.ge p6,p0=r29,r26 // if tpr masks interrupt, just rfi
-(p6) br.cond.spnt.few just_do_rfi;;
-END(rfi_check_extint)
-
-// this doesn't work yet (dies early after getting to user mode)
-// but happens relatively infrequently, so fix it later.
-// NOTE that these will be counted incorrectly for now (for privcnt output)
-ENTRY(rfi_with_interrupt)
-#if 1
- br.sptk.many dispatch_break_fault ;;
-#endif
-
- // OK, have an unmasked vector, so deliver extint to vcr.iva+0x3000
- // r18 == XSI_PSR_IC
- // r21 == vipsr (ipsr in shared_mem)
- // r30 == IA64_KR(CURRENT)
- // r31 == pr
- mov r17=cr.ipsr
- mov r16=cr.isr;;
- // set shared_mem isr
- extr.u r16=r16,IA64_ISR_IR_BIT,1;; // grab cr.isr.ir bit
- dep r16=r16,r0,IA64_ISR_IR_BIT,1 // insert into cr.isr (rest of bits zero)
- extr.u r20=r21,IA64_PSR_RI_BIT,2 ;; // get v(!)psr.ri
- dep r16=r20,r16,IA64_PSR_RI_BIT,2 ;; // deposit cr.isr.ei
- adds r22=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r22]=r16;;
- movl r22=THIS_CPU(current_psr_i_addr)
- // set cr.ipsr (make sure cpl==2!)
- mov r29=r17
- movl r27=~DELIVER_PSR_CLR
- movl r28=DELIVER_PSR_SET | (CONFIG_CPL0_EMUL << IA64_PSR_CPL0_BIT)
- mov r20=1;;
- ld8 r22=[r22]
- and r29=r29,r27;;
- or r29=r29,r28;;
- mov cr.ipsr=r29
- // v.ipsr and v.iip are already set (and v.iip validated) as rfi target
- // set shared_mem interrupt_delivery_enabled to 0
- // set shared_mem interrupt_collection_enabled to 0
- st1 [r22]=r20
- st4 [r18]=r0;;
- // cover and set shared_mem precover_ifs to cr.ifs
- // set shared_mem ifs to 0
-#if 0
- cover ;;
- mov r20=cr.ifs
- adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r22]=r0 ;;
- adds r22=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r22]=r20 ;;
- // leave cr.ifs alone for later rfi
-#else
- adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r20=[r22];;
- st8 [r22]=r0 ;;
- adds r22=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r22]=r20 ;;
-#endif
- // set iip to go to domain IVA break instruction vector
- adds r22=IA64_VCPU_IVA_OFFSET,r30;;
- ld8 r23=[r22]
- movl r24=0x3000;;
- add r24=r24,r23;;
- mov cr.iip=r24;;
-#if 0
- // OK, now all set to go except for switch to virtual bank0
- mov r30=r2
- mov r29=r3;;
- adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18
- adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
- bsw.1;;
- // FIXME: need to handle ar.unat!
- .mem.offset 0,0; st8.spill [r2]=r16,16
- .mem.offset 8,0; st8.spill [r3]=r17,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r18,16
- .mem.offset 8,0; st8.spill [r3]=r19,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r20,16
- .mem.offset 8,0; st8.spill [r3]=r21,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r22,16
- .mem.offset 8,0; st8.spill [r3]=r23,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r24,16
- .mem.offset 8,0; st8.spill [r3]=r25,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r26,16
- .mem.offset 8,0; st8.spill [r3]=r27,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r28,16
- .mem.offset 8,0; st8.spill [r3]=r29,16 ;;
- .mem.offset 0,0; st8.spill [r2]=r30,16
- .mem.offset 8,0; st8.spill [r3]=r31,16 ;;
- bsw.0 ;;
- mov r2=r30
- mov r3=r29;;
-#endif
- adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
- st4 [r20]=r0
- mov pr=r31,-1 ;;
- rfi
-END(rfi_with_interrupt)
-#endif // RFI_TO_INTERRUPT
-
-ENTRY(hyper_cover)
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_COVER);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- mov r24=cr.ipsr
- mov r25=cr.iip;;
- // skip test for vpsr.ic.. it's a prerequisite for hyperprivops
- cover ;;
- mov r30=cr.ifs
- adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18;;
- st8 [r22]=r30
- mov cr.ifs=r0
- // adjust return address to skip over break instruction
- extr.u r26=r24,41,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r25=16,r25
-(p7) adds r26=1,r26
- ;;
- dep r24=r26,r24,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r24
- mov cr.iip=r25
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_cover)
-
-// return from metaphysical mode (meta=1) to virtual mode (meta=0)
-ENTRY(hyper_ssm_dt)
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SSM_DT);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- mov r24=cr.ipsr
- mov r25=cr.iip
- adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld4 r21=[r20];;
- cmp.eq p7,p0=r21,r0 // meta==0?
-(p7) br.spnt.many 1f ;; // already in virtual mode
- movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r22=[r22];;
- adds r22=IA64_VCPU_META_SAVED_RR0_OFFSET,r22;;
- ld8 r23=[r22];;
- mov rr[r0]=r23;;
- srlz.i;;
- st4 [r20]=r0
- // adjust return address to skip over break instruction
-1: extr.u r26=r24,IA64_PSR_RI_BIT,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r25=16,r25
-(p7) adds r26=1,r26
- ;;
- dep r24=r26,r24,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r24
- mov cr.iip=r25
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_ssm_dt)
-
-// go to metaphysical mode (meta=1) from virtual mode (meta=0)
-ENTRY(hyper_rsm_dt)
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_RSM_DT);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- mov r24=cr.ipsr
- mov r25=cr.iip
- adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld4 r21=[r20];;
- cmp.ne p7,p0=r21,r0 // meta==0?
-(p7) br.spnt.many 1f ;; // already in metaphysical mode
- movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r22=[r22];;
- adds r22=IA64_VCPU_META_RID_DT_OFFSET,r22;;
- ld8 r23=[r22];;
- mov rr[r0]=r23;;
- srlz.i;;
- adds r21=1,r0 ;;
- st4 [r20]=r21
- // adjust return address to skip over break instruction
-1: extr.u r26=r24,IA64_PSR_RI_BIT,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r25=16,r25
-(p7) adds r26=1,r26
- ;;
- dep r24=r26,r24,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r24
- mov cr.iip=r25
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_rsm_dt)
-
-ENTRY(hyper_set_itm)
- // when we get to here r20=~=interrupts pending
- cmp.ne p7,p0=r20,r0
-(p7) br.spnt.many dispatch_break_fault ;;
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_ITM);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- movl r20=THIS_CPU(cpu_info)+IA64_CPUINFO_ITM_NEXT_OFFSET;;
- ld8 r21=[r20];;
- movl r20=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r20=[r20];;
- adds r20=IA64_VCPU_DOMAIN_ITM_OFFSET,r20;;
- st8 [r20]=r8
- cmp.geu p6,p0=r21,r8;;
-(p6) mov r21=r8
- // now "safe set" cr.itm=r21
- mov r23=100;;
-2: mov cr.itm=r21;;
- srlz.d;;
- mov r22=ar.itc ;;
- cmp.leu p6,p0=r21,r22;;
- add r21=r21,r23;;
- shl r23=r23,1
-(p6) br.cond.spnt.few 2b;;
-1: mov r24=cr.ipsr
- mov r25=cr.iip;;
- extr.u r26=r24,IA64_PSR_RI_BIT,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r25=16,r25
-(p7) adds r26=1,r26
- ;;
- dep r24=r26,r24,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r24
- mov cr.iip=r25
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_set_itm)
-
-ENTRY(hyper_get_psr)
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_PSR);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- mov r24=cr.ipsr
- movl r8=0xffffffff | IA64_PSR_MC | IA64_PSR_IT;;
- // only return PSR{36:35,31:0}
- and r8=r8,r24
- // get vpsr.ic
- ld4 r21=[r18];;
- dep r8=r21,r8,IA64_PSR_IC_BIT,1
- // get vpsr.pp
- adds r20=XSI_VPSR_PP_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld1 r21=[r20];;
- dep r8=r21,r8,IA64_PSR_PP_BIT,1
- // get vpsr.dt
- adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld4 r21=[r20];;
- cmp.ne p6,p0=r21,r0
- ;;
-(p6) dep.z r8=r8,IA64_PSR_DT_BIT,1
- // get vpsr.i
- adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r20=[r20];;
- ld1 r21=[r20];;
- cmp.eq p8,p9=r0,r21
- ;;
-(p8) dep r8=-1,r8,IA64_PSR_I_BIT,1
-(p9) dep r8=0,r8,IA64_PSR_I_BIT,1
- // get vpsr.dfh
- adds r20=XSI_VPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
- ld1 r21=[r20];;
- dep r8=r21,r8,IA64_PSR_DFH_BIT,1
- ;;
- mov r25=cr.iip
- extr.u r26=r24,IA64_PSR_RI_BIT,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r25=16,r25
-(p7) adds r26=1,r26
- ;;
- dep r24=r26,r24,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r24
- mov cr.iip=r25
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_get_psr)
-
-
-ENTRY(hyper_get_rr)
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_RR);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- extr.u r25=r8,61,3;;
- adds r20=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18
- shl r25=r25,3;;
- add r20=r20,r25;;
- ld8 r8=[r20]
-1: mov r24=cr.ipsr
- mov r25=cr.iip;;
- extr.u r26=r24,IA64_PSR_RI_BIT,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r25=16,r25
-(p7) adds r26=1,r26
- ;;
- dep r24=r26,r24,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r24
- mov cr.iip=r25
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_get_rr)
-
-ENTRY(hyper_set_rr)
- extr.u r25=r8,61,3;;
- cmp.leu p7,p0=7,r25 // punt on setting rr7
-(p7) br.spnt.many dispatch_break_fault ;;
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_RR);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- extr.u r26=r9,IA64_RR_RID,IA64_RR_RID_LEN // r26 = r9.rid
- movl r20=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r20=[r20];;
- adds r22=IA64_VCPU_STARTING_RID_OFFSET,r20
- adds r23=IA64_VCPU_ENDING_RID_OFFSET,r20
- adds r24=IA64_VCPU_META_SAVED_RR0_OFFSET,r20
- adds r21=IA64_VCPU_VHPT_PG_SHIFT_OFFSET,r20;;
- ld4 r22=[r22]
- ld4 r23=[r23]
- ld1 r21=[r21];;
- add r22=r26,r22;;
- cmp.geu p6,p0=r22,r23 // if r9.rid + starting_rid >= ending_rid
-(p6) br.cond.spnt.few 1f; // this is an error, but just ignore/return
- adds r20=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18
- shl r25=r25,3;;
- add r20=r20,r25;;
- st8 [r20]=r9;; // store away exactly what was passed
- // but adjust value actually placed in rr[r8]
- // r22 contains adjusted rid, "mangle" it (see regionreg.c)
- // and set ps to v->arch.vhpt_pg_shift and ve to 1
- extr.u r27=r22,0,8
- extr.u r28=r22,8,8
- extr.u r29=r22,16,8
- dep.z r23=r21,IA64_RR_PS,IA64_RR_PS_LEN;;
- dep r23=-1,r23,0,1;; // mangling is swapping bytes 1 & 3
- dep r23=r27,r23,24,8;;
- dep r23=r28,r23,16,8;;
- dep r23=r29,r23,8,8
- cmp.eq p6,p0=r25,r0;; // if rr0, save for metaphysical
-(p6) st8 [r24]=r23
- mov rr[r8]=r23;;
- // done, mosey on back
-1: mov r24=cr.ipsr
- mov r25=cr.iip;;
- extr.u r26=r24,IA64_PSR_RI_BIT,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r25=16,r25
-(p7) adds r26=1,r26
- ;;
- dep r24=r26,r24,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r24
- mov cr.iip=r25
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_set_rr)
-
-// r8 = val0
-// r9 = val1
-// r10 = val2
-// r11 = val3
-// r14 = val4
-// mov rr[0x0000000000000000UL] = r8
-// mov rr[0x2000000000000000UL] = r9
-// mov rr[0x4000000000000000UL] = r10
-// mov rr[0x6000000000000000UL] = r11
-// mov rr[0x8000000000000000UL] = r14
-ENTRY(hyper_set_rr0_to_rr4)
-#ifndef FAST_SET_RR0_TO_RR4
- br.spnt.few dispatch_break_fault ;;
-#endif
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_RR0_TO_RR4);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- movl r17=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r17=[r17];;
-
- adds r21=IA64_VCPU_STARTING_RID_OFFSET,r17
- adds r22=IA64_VCPU_ENDING_RID_OFFSET,r17
- adds r23=IA64_VCPU_VHPT_PG_SHIFT_OFFSET,r17
- ;;
- ld4 r21=[r21] // r21 = current->starting_rid
- extr.u r26=r8,IA64_RR_RID,IA64_RR_RID_LEN // r26 = r8.rid
- extr.u r27=r9,IA64_RR_RID,IA64_RR_RID_LEN // r27 = r9.rid
- ld4 r22=[r22] // r22 = current->ending_rid
- extr.u r28=r10,IA64_RR_RID,IA64_RR_RID_LEN // r28 = r10.rid
- extr.u r29=r11,IA64_RR_RID,IA64_RR_RID_LEN // r29 = r11.rid
- adds r24=IA64_VCPU_META_SAVED_RR0_OFFSET,r17
- extr.u r30=r14,IA64_RR_RID,IA64_RR_RID_LEN // r30 = r14.rid
- ld1 r23=[r23] // r23 = current->vhpt_pg_shift
- ;;
- add r16=r26,r21
- add r17=r27,r21
- add r19=r28,r21
- add r20=r29,r21
- add r21=r30,r21
- dep.z r23=r23,IA64_RR_PS,IA64_RR_PS_LEN // r23 = rr.ps
- ;;
- cmp.geu p6,p0=r16,r22 // if r8.rid + starting_rid >= ending_rid
- cmp.geu p7,p0=r17,r22 // if r9.rid + starting_rid >= ending_rid
- cmp.geu p8,p0=r19,r22 // if r10.rid + starting_rid >= ending_rid
-(p6) br.cond.spnt.few 1f // this is an error, but just ignore/return
-(p7) br.cond.spnt.few 1f // this is an error, but just ignore/return
- cmp.geu p9,p0=r20,r22 // if r11.rid + starting_rid >= ending_rid
-(p8) br.cond.spnt.few 1f // this is an error, but just ignore/return
-(p9) br.cond.spnt.few 1f // this is an error, but just ignore/return
- cmp.geu p10,p0=r21,r22 // if r14.rid + starting_rid >= ending_rid
-(p10) br.cond.spnt.few 1f // this is an error, but just ignore/return
- dep r23=-1,r23,0,1 // add rr.ve
- ;;
- mov r25=1
- adds r22=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18
- ;;
- shl r30=r25,61 // r30 = 0x2000000000000000
-
-#if 0
- // simple plain version
- // rr0
- st8 [r22]=r8, 8 // current->rrs[0] = r8
-
- mov r26=0 // r26=0x0000000000000000
- extr.u r27=r16,0,8
- extr.u r28=r16,8,8
- extr.u r29=r16,16,8;;
- dep r25=r27,r23,24,8;; // mangling is swapping bytes 1 & 3
- dep r25=r28,r25,16,8;;
- dep r25=r29,r25,8,8;;
- st8 [r24]=r25 // save for metaphysical
- mov rr[r26]=r25
- dv_serialize_data
-
- // rr1
- st8 [r22]=r9, 8 // current->rrs[1] = r9
- add r26=r26,r30 // r26 = 0x2000000000000000
- extr.u r27=r17,0,8
- extr.u r28=r17,8,8
- extr.u r29=r17,16,8;;
- dep r25=r27,r23,24,8;; // mangling is swapping bytes 1 & 3
- dep r25=r28,r25,16,8;;
- dep r25=r29,r25,8,8;;
- mov rr[r26]=r25
- dv_serialize_data
-
- // rr2
- st8 [r22]=r10, 8 // current->rrs[2] = r10
- add r26=r26,r30 // r26 = 0x4000000000000000
- extr.u r27=r19,0,8
- extr.u r28=r19,8,8
- extr.u r29=r19,16,8;;
- dep r25=r27,r23,24,8;; // mangling is swapping bytes 1 & 3
- dep r25=r28,r25,16,8;;
- dep r25=r29,r25,8,8;;
- mov rr[r26]=r25
- dv_serialize_data
-
- // rr3
- st8 [r22]=r11, 8 // current->rrs[3] = r11
-
- add r26=r26,r30 // r26 = 0x6000000000000000
- extr.u r27=r20,0,8
- extr.u r28=r20,8,8
- extr.u r29=r20,16,8;;
- dep r25=r27,r23,24,8;; // mangling is swapping bytes 1 & 3
- dep r25=r28,r25,16,8;;
- dep r25=r29,r25,8,8;;
- mov rr[r26]=r25
- dv_serialize_data
-
- // rr4
- st8 [r22]=r14 // current->rrs[4] = r14
-
- add r26=r26,r30 // r26 = 0x8000000000000000
- extr.u r27=r21,0,8
- extr.u r28=r21,8,8
- extr.u r29=r21,16,8;;
- dep r25=r27,r23,24,8;; // mangling is swapping bytes 1 & 3
- dep r25=r28,r25,16,8;;
- dep r25=r29,r25,8,8;;
- mov rr[r26]=r25
- dv_serialize_data
-#else
- // shuffled version
- // rr0
- // uses r27, r28, r29 for mangling
- // r25 for mangled value
- st8 [r22]=r8, 8 // current->rrs[0] = r8
- mov r26=0 // r26=0x0000000000000000
- extr.u r27=r16,0,8
- extr.u r28=r16,8,8
- extr.u r29=r16,16,8;;
- dep r25=r27,r23,24,8;; // mangling is swapping bytes 1 & 3
- dep r25=r28,r25,16,8;;
- dep r25=r29,r25,8,8;;
- st8 [r24]=r25 // save for metaphysical
- mov rr[r26]=r25
- dv_serialize_data
-
- // r16, r24, r25 is usable.
- // rr1
- // uses r25, r28, r29 for mangling
- // r25 for mangled value
- extr.u r25=r17,0,8
- extr.u r28=r17,8,8
- st8 [r22]=r9, 8 // current->rrs[1] = r9
- extr.u r29=r17,16,8 ;;
- add r26=r26,r30 // r26 = 0x2000000000000000
- extr.u r24=r19,8,8
- extr.u r16=r19,0,8
- dep r25=r25,r23,24,8;; // mangling is swapping bytes 1 & 3
- dep r25=r28,r25,16,8;;
- dep r25=r29,r25,8,8;;
- mov rr[r26]=r25
- dv_serialize_data
-
- // r16, r17, r24, r25 is usable
- // rr2
- // uses r16, r24, r29 for mangling
- // r17 for mangled value
- extr.u r29=r19,16,8
- extr.u r27=r20,0,8
- st8 [r22]=r10, 8 // current->rrs[2] = r10
- add r26=r26,r30 // r26 = 0x4000000000000000
- dep r17=r16,r23,24,8;; // mangling is swapping bytes 1 & 3
- dep r17=r24,r17,16,8;;
- dep r17=r29,r17,8,8;;
- mov rr[r26]=r17
- dv_serialize_data
-
- // r16, r17, r19, r24, r25 is usable
- // rr3
- // uses r27, r28, r29 for mangling
- // r25 for mangled value
- extr.u r28=r20,8,8
- extr.u r29=r20,16,8
- st8 [r22]=r11, 8 // current->rrs[3] = r11
- extr.u r16=r21,0,8
- add r26=r26,r30 // r26 = 0x6000000000000000
- dep r25=r27,r23,24,8;; // mangling is swapping bytes 1 & 3
- dep r25=r28,r25,16,8;;
- dep r25=r29,r25,8,8;;
- mov rr[r26]=r25
- dv_serialize_data
-
- // r16, r17, r19, r20, r24, r25
- // rr4
- // uses r16, r17, r24 for mangling
- // r25 for mangled value
- extr.u r17=r21,8,8
- extr.u r24=r21,16,8
- st8 [r22]=r14 // current->rrs[4] = r14
- add r26=r26,r30 // r26 = 0x8000000000000000
- dep r25=r16,r23,24,8;; // mangling is swapping bytes 1 & 3
- dep r25=r17,r25,16,8;;
- dep r25=r24,r25,8,8;;
- mov rr[r26]=r25
- dv_serialize_data
-#endif
-
- // done, mosey on back
-1: mov r24=cr.ipsr
- mov r25=cr.iip;;
- extr.u r26=r24,IA64_PSR_RI_BIT,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r25=16,r25
-(p7) adds r26=1,r26
- ;;
- dep r24=r26,r24,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r24
- mov cr.iip=r25
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_set_rr0_to_rr4)
-
-ENTRY(hyper_set_kr)
- extr.u r25=r8,3,61;;
- cmp.ne p7,p0=r0,r25 // if kr# > 7, go slow way
-(p7) br.spnt.many dispatch_break_fault ;;
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_KR);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- adds r21=XSI_KR0_OFS-XSI_PSR_IC_OFS,r18
- shl r20=r8,3;;
- add r22=r20,r21;;
- st8 [r22]=r9;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar0=r9;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar1=r9;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar2=r9;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar3=r9;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar4=r9;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar5=r9;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar6=r9;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar7=r9;;
- // done, mosey on back
-1: mov r24=cr.ipsr
- mov r25=cr.iip;;
- extr.u r26=r24,IA64_PSR_RI_BIT,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r25=16,r25
-(p7) adds r26=1,r26
- ;;
- dep r24=r26,r24,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r24
- mov cr.iip=r25
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_set_kr)
-
-// this routine was derived from optimized assembly output from
-// vcpu_thash so it is dense and difficult to read but it works
-// On entry:
-// r18 == XSI_PSR_IC
-// r31 == pr
-ENTRY(hyper_thash)
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_THASH);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- shr.u r20 = r8, 61
- addl r25 = 1, r0
- movl r17 = 0xe000000000000000
- ;;
- and r21 = r17, r8 // VHPT_Addr1
- ;;
- shladd r28 = r20, 3, r18
- adds r19 = XSI_PTA_OFS-XSI_PSR_IC_OFS, r18
- ;;
- adds r27 = XSI_RR0_OFS-XSI_PSR_IC_OFS, r28
- addl r28 = 32767, r0
- ld8 r24 = [r19] // pta
- ;;
- ld8 r23 = [r27] // rrs[vadr>>61]
- extr.u r26 = r24, IA64_PTA_SIZE_BIT, IA64_PTA_SIZE_LEN
- ;;
- extr.u r22 = r23, IA64_RR_PS, IA64_RR_PS_LEN
- shl r30 = r25, r26
- ;;
- shr.u r19 = r8, r22
- shr.u r29 = r24, 15
- ;;
- adds r17 = -1, r30
- ;;
- shladd r27 = r19, 3, r0
- extr.u r26 = r17, 15, 46
- ;;
- andcm r24 = r29, r26
- and r19 = r28, r27
- shr.u r25 = r27, 15
- ;;
- and r23 = r26, r25
- ;;
- or r22 = r24, r23
- ;;
- dep.z r20 = r22, 15, 46
- ;;
- or r16 = r20, r21
- ;;
- or r8 = r19, r16
- // done, update iip/ipsr to next instruction
- mov r24=cr.ipsr
- mov r25=cr.iip;;
- extr.u r26=r24,IA64_PSR_RI_BIT,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r25=16,r25
-(p7) adds r26=1,r26
- ;;
- dep r24=r26,r24,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r24
- mov cr.iip=r25
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_thash)
-
-ENTRY(hyper_ptc_ga)
-#ifndef FAST_PTC_GA
- br.spnt.few dispatch_break_fault ;;
-#endif
- // FIXME: validate not flushing Xen addresses
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_PTC_GA);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
- movl r21=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r21=[r21];;
- adds r22=IA64_VCPU_VHPT_PG_SHIFT_OFFSET,r21
- mov r28=r8
- extr.u r19=r9,2,6 // addr_range=1<<((r9&0xfc)>>2)
- mov r20=1
- shr.u r24=r8,61
- movl r26=0x8000000000000000 // INVALID_TI_TAG
- mov r30=ar.lc
- ;;
- ld1 r22=[r22] // current->arch.vhpt_pg_shift
- shl r19=r20,r19
- cmp.eq p7,p0=7,r24
-(p7) br.spnt.many dispatch_break_fault ;; // slow way for rr7
- ;;
- shl r27=r22,2 // vhpt_pg_shift<<2 (for ptc.ga)
- shr.u r23=r19,r22 // repeat loop for n pages
- cmp.le p7,p0=r19,r0 // skip flush if size<=0
-(p7) br.cond.dpnt 2f ;;
- shl r24=r23,r22;;
- cmp.ne p7,p0=r24,r23 ;;
-(p7) adds r23=1,r23 ;; // n_pages<size<n_pages+1? extra iter
- mov ar.lc=r23
- shl r29=r20,r22;; // page_size
-1:
- thash r25=r28 ;;
- adds r25=16,r25 ;;
- ld8 r24=[r25] ;;
- // FIXME: should check if tag matches, not just blow it away
- or r24=r26,r24 ;; // vhpt_entry->ti_tag = 1
- st8 [r25]=r24
- ptc.ga r28,r27 ;;
- srlz.i ;;
- add r28=r29,r28
- br.cloop.sptk.few 1b
- ;;
-2:
- mov ar.lc=r30 ;;
- mov r29=cr.ipsr
- mov r30=cr.iip;;
- adds r25=IA64_VCPU_DTLB_OFFSET,r21
- adds r26=IA64_VCPU_ITLB_OFFSET,r21;;
- ld8 r24=[r25]
- ld8 r27=[r26] ;;
- and r24=-2,r24
- and r27=-2,r27 ;;
- st8 [r25]=r24 // set 1-entry i/dtlb as not present
- st8 [r26]=r27 ;;
- // increment to point to next instruction
- extr.u r26=r29,IA64_PSR_RI_BIT,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r30=16,r30
-(p7) adds r26=1,r26
- ;;
- dep r29=r26,r29,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r29
- mov cr.iip=r30
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(hyper_ptc_ga)
-
-// recovery block for hyper_itc metaphysical memory lookup
-ENTRY(recover_and_dispatch_break_fault)
-#ifdef PERF_COUNTERS
- movl r21=PERFC(recover_to_break_fault);;
- ld4 r22=[r21];;
- adds r22=1,r22;;
- st4 [r21]=r22;;
-#endif
- mov b0=r29 ;;
- br.sptk.many dispatch_break_fault;;
-END(recover_and_dispatch_break_fault)
-
-// Registers at entry
-// r17 = break immediate (HYPERPRIVOP_ITC_D or I)
-// r18 == XSI_PSR_IC_OFS
-// r31 == pr
-ENTRY(hyper_itc)
-hyper_itc_i:
- // fall through, hyper_itc_d handles both i and d
-hyper_itc_d:
-#ifndef FAST_ITC
- br.sptk.many dispatch_break_fault ;;
-#else
- // ensure itir.ps >= xen's pagesize
- movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r27=[r27];;
- adds r22=IA64_VCPU_VHPT_PG_SHIFT_OFFSET,r27
- adds r23=XSI_ITIR_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld1 r22=[r22]
- ld8 r23=[r23];;
- extr.u r24=r23,IA64_ITIR_PS,IA64_ITIR_PS_LEN;; // r24==logps
- cmp.gt p7,p0=r22,r24
-(p7) br.spnt.many dispatch_break_fault ;;
- adds r21=XSI_IFA_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r21=[r21];;
- // for now, punt on region0 inserts
- extr.u r21=r21,61,3;;
- cmp.eq p7,p0=r21,r0
-(p7) br.spnt.many dispatch_break_fault ;;
- adds r27=IA64_VCPU_DOMAIN_OFFSET,r27;;
- ld8 r27=[r27]
-// FIXME: is the global var dom0 always pinned? assume so for now
- movl r28=dom0;;
- ld8 r28=[r28];;
-// FIXME: for now, only handle dom0 (see lookup_domain_mpa below)
- cmp.ne p7,p0=r27,r28
-(p7) br.spnt.many dispatch_break_fault ;;
-#ifdef FAST_HYPERPRIVOP_CNT
- cmp.eq p6,p7=HYPERPRIVOP_ITC_D,r17;;
-(p6) movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_ITC_D)
-(p7) movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_ITC_I);;
- ld4 r21=[r20];;
- adds r21=1,r21;;
- st4 [r20]=r21;;
-#endif
-(p6) mov r17=2;;
-(p7) mov r17=3;;
- mov r29=b0 ;;
- movl r30=recover_and_dispatch_break_fault ;;
- mov r16=r8;;
- // fall through
-#endif
-END(hyper_itc)
-
-#if defined(FAST_ITC) || defined (FAST_TLB_MISS_REFLECT)
-
-// fast_insert(PSCB(ifa),r24=ps,r16=pte)
-// r16 == pte
-// r17 == bit0: 1=inst, 0=data; bit1: 1=itc, 0=vcpu_translate
-// r18 == XSI_PSR_IC_OFS
-// r24 == ps
-// r29 == saved value of b0 in case of recovery
-// r30 == recovery ip if failure occurs
-// r31 == pr
-ENTRY(fast_insert)
- // translate_domain_pte(r16=pteval,PSCB(ifa)=address,r24=itir)
- mov r19=1
- movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- shl r20=r19,r24
- ld8 r27=[r27];;
- adds r23=IA64_VCPU_VHPT_PG_SHIFT_OFFSET,r27
- adds r20=-1,r20 // r20 == mask
- movl r19=_PAGE_PPN_MASK;;
- ld1 r23=[r23]
- mov r25=-1
- and r22=r16,r19;; // r22 == pteval & _PAGE_PPN_MASK
- andcm r19=r22,r20
- shl r25=r25,r23 // -1 << current->arch.vhpt_pg_shift
- adds r21=XSI_IFA_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r21=[r21];;
- and r20=r21,r20;;
- or r19=r19,r20;; // r19 == mpaddr
-// FIXME: for now, just do domain0 and skip mpaddr range checks
- and r20=r25,r19
- movl r21=PAGE_PHYS ;;
- or r20=r20,r21 ;; // r20==return value from lookup_domain_mpa
- // r16=pteval,r20=pteval2
- movl r19=_PAGE_PPN_MASK
- movl r21=_PAGE_PL_PRIV;;
- andcm r25=r16,r19 // r25==pteval & ~_PAGE_PPN_MASK
- and r22=r20,r19;;
- or r22=r22,r21;;
- or r22=r22,r25;; // r22==return value from translate_domain_pte
- // done with translate_domain_pte
- // now do vcpu_itc_no_srlz(vcpu,IorD,ifa,r22=pte,r16=mppte,r24=logps)
-// FIXME: for now, just domain0 and skip range check
- // psr.ic already cleared
- // NOTE: r24 still contains ps (from above)
- shladd r24=r24,2,r0;;
- mov cr.itir=r24
- adds r23=XSI_IFA_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r23=[r23];;
- mov cr.ifa=r23
- tbit.z p6,p7=r17,0;;
-(p6) itc.d r22
-(p7) itc.i r22;;
- dv_serialize_data
- // vhpt_insert(r23=vaddr,r22=pte,r24=logps<<2)
- thash r28=r23
- or r26=1,r22;;
- ttag r21=r23
- adds r25=8,r28
- mov r19=r28;;
- st8 [r25]=r24
- adds r20=16,r28;;
- st8 [r19]=r26
- st8 [r20]=r21;;
- // vcpu_set_tr_entry(trp,r22=pte|1,r24=itir,r23=ifa)
- // TR_ENTRY = {page_flags,itir,addr,rid}
- tbit.z p6,p7=r17,0
- adds r28=IA64_VCPU_STARTING_RID_OFFSET,r27
-(p6) adds r27=IA64_VCPU_DTLB_OFFSET,r27
-(p7) adds r27=IA64_VCPU_ITLB_OFFSET,r27;;
- st8 [r27]=r22,8;; // page_flags: already has pl >= 2 and p==1
- st8 [r27]=r24,8 // itir
- mov r19=-4096;;
- and r23=r23,r19;;
- st8 [r27]=r23,8 // ifa & ~0xfff
- adds r29 = XSI_RR0_OFS-XSI_PSR_IC_OFS,r18
- extr.u r25=r23,61,3;;
- shladd r29=r25,3,r29;;
- ld8 r29=[r29]
- movl r20=IA64_RR_RID_MASK;;
- and r29=r29,r20;;
- st8 [r27]=r29,-8;; // rid
- //if ps > 12
- cmp.eq p7,p0=12<<IA64_ITIR_PS,r24
-(p7) br.cond.sptk.many 1f;;
- // if (ps > 12) {
- // trp->ppn &= ~((1UL<<(ps-12))-1); trp->vadr &= ~((1UL<<ps)-1); }
- extr.u r29=r24,IA64_ITIR_PS,IA64_ITIR_PS_LEN
- mov r28=1;;
- shl r26=r28,r29;;
- adds r29=-12,r29;;
- shl r25=r28,r29;;
- mov r29=-1
- adds r26=-1,r26
- adds r25=-1,r25;;
- andcm r26=r29,r26 // ~((1UL<<ps)-1)
- andcm r25=r29,r25;; // ~((1UL<<(ps-12))-1)
- ld8 r29=[r27];;
- and r29=r29,r26;;
- st8 [r27]=r29,-16;;
- ld8 r29=[r27];;
- extr.u r28=r29,12,38;;
- movl r26=0xfffc000000000fff;;
- and r29=r29,r26
- and r28=r28,r25;;
- shl r28=r28,12;;
- or r29=r29,r28;;
- st8 [r27]=r29;;
-1: // done with vcpu_set_tr_entry
- //PSCBX(vcpu,i/dtlb_pte) = mp_pte
- movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r27=[r27];;
- tbit.z p6,p7=r17,0;;
-(p6) adds r27=IA64_VCPU_DTLB_PTE_OFFSET,r27
-(p7) adds r27=IA64_VCPU_ITLB_PTE_OFFSET,r27;;
- st8 [r27]=r16;;
- // done with vcpu_itc_no_srlz
-
- // if hyper_itc, increment to point to next instruction
- tbit.z p7,p0=r17,1
-(p7) br.cond.sptk.few no_inc_iip;;
-
- mov r29=cr.ipsr
- mov r30=cr.iip;;
- extr.u r26=r29,IA64_PSR_RI_BIT,2 ;;
- cmp.eq p6,p7=2,r26 ;;
-(p6) mov r26=0
-(p6) adds r30=16,r30
-(p7) adds r26=1,r26
- ;;
- dep r29=r26,r29,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r29
- mov cr.iip=r30;;
-
-no_inc_iip:
- mov pr=r31,-1 ;;
- rfi
- ;;
-END(fast_insert)
-#endif
diff --git a/xen/arch/ia64/xen/idle0_task.c b/xen/arch/ia64/xen/idle0_task.c
deleted file mode 100644
index 0708f7d385..0000000000
--- a/xen/arch/ia64/xen/idle0_task.c
+++ /dev/null
@@ -1,29 +0,0 @@
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <asm/desc.h>
-
-#define IDLE_VCPU(_v) \
-{ \
- processor: 0, \
- domain: 0 \
-}
-
-/*
- * Initial task structure.
- *
- * We need to make sure that this is properly aligned due to the way process
- * stacks are handled.
- * This is done by having a special ".data.init_task" section...
- *
- * init_task_mem shouldn't be used directly. the corresponding address in
- * the identity mapping area should be used.
- * I.e. __va(ia64_tpa(init_task_mem)) should be used.
- */
-union {
- struct {
- struct vcpu task;
- } s;
- unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
-} init_task_mem __attribute__((section(".data.init_task"))) = {{
- .task = IDLE_VCPU(init_task_mem.s.task)
-}};
diff --git a/xen/arch/ia64/xen/irq.c b/xen/arch/ia64/xen/irq.c
deleted file mode 100644
index 4cf0dd7a42..0000000000
--- a/xen/arch/ia64/xen/irq.c
+++ /dev/null
@@ -1,633 +0,0 @@
-/*
- * linux/arch/ia64/kernel/irq.c
- *
- * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
- *
- * This file contains the code used by various IRQ handling routines:
- * asking for different IRQ's should be done through these routines
- * instead of just grabbing them. Thus setups with different IRQ numbers
- * shouldn't result in any weird surprises, and installing new handlers
- * should be easier.
- *
- * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
- *
- * 4/14/2004: Added code to handle cpu migration and do safe irq
- * migration without lossing interrupts for iosapic
- * architecture.
- */
-
-/*
- * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
- *
- * IRQs are in fact implemented a bit like signal handlers for the kernel.
- * Naturally it's not a 1:1 relation, but there are similarities.
- */
-
-#include <linux/config.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/init.h>
-#include <linux/seq_file.h>
-
-#include <asm/atomic.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
-#include <asm/pgalloc.h>
-#include <asm/delay.h>
-#include <xen/irq.h>
-#include <asm/hw_irq.h>
-
-#include <xen/event.h>
-#define apicid_to_phys_cpu_present(x) 1
-
-#ifdef CONFIG_IA64_GENERIC
-unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
-{
- return (unsigned int) vec;
-}
-#endif
-
-/*
- * Linux has a controller-independent x86 interrupt architecture.
- * every controller has a 'controller-template', that is used
- * by the main code to do the right thing. Each driver-visible
- * interrupt source is transparently wired to the appropriate
- * controller. Thus drivers need not be aware of the
- * interrupt-controller.
- *
- * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
- * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
- * (IO-APICs assumed to be messaging to Pentium local-APICs)
- *
- * the code is designed to be easily extended with new/different
- * interrupt controllers, without having to do assembly magic.
- */
-
-/*
- * Controller mappings for all interrupt sources:
- */
-irq_desc_t irq_desc[NR_IRQS];
-
-int __init arch_init_one_irq_desc(struct irq_desc *desc)
-{
- if (!alloc_cpumask_var(&desc->arch.cpu_mask))
- return -ENOMEM;
-
- desc->arch.vector = -1;
- cpumask_setall(desc->arch.cpu_mask);
-
- return 0;
-}
-
-int __init init_irq_data(void)
-{
- unsigned int irq;
-
- for (irq = 0; irq < NR_IRQS; irq++) {
- struct irq_desc *desc = irq_to_desc(irq);
-
- desc->irq = irq;
- if (init_one_irq_desc(desc))
- BUG();
- }
-
- return 0;
-}
-
-void __do_IRQ_guest(int irq);
-
-/*
- * Special irq handlers.
- */
-
-static void ack_none(struct irq_desc *desc)
-{
-/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves, it doesn't deserve
- * a generic callback i think.
- */
- printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", desc->irq, smp_processor_id());
-}
-
-hw_irq_controller no_irq_type = {
- .typename = "none",
- .startup = irq_startup_none,
- .shutdown = irq_shutdown_none,
- .enable = irq_enable_none,
- .disable = irq_disable_none,
- .ack = ack_none,
- .end = irq_actor_none
-};
-
-/*
- * Generic enable/disable code: this just calls
- * down into the PIC-specific version for the actual
- * hardware disable after having gotten the irq
- * controller lock.
- */
-
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
-{
- irq_desc_t *desc = irq_desc + irq;
- struct irqaction * action;
- unsigned int status;
-
- if (likely(desc->status & IRQ_PER_CPU)) {
- /*
- * No locking required for CPU-local interrupts:
- */
- desc->handler->ack(desc);
- local_irq_enable();
- desc->action->handler(irq, desc->action->dev_id, regs);
- local_irq_disable();
- desc->handler->end(desc);
- return 1;
- }
-
- spin_lock(&desc->lock);
-
- if (desc->status & IRQ_GUEST) {
- __do_IRQ_guest(irq);
- spin_unlock(&desc->lock);
- return 1;
- }
-
- desc->handler->ack(desc);
- status = desc->status & ~IRQ_REPLAY;
- status |= IRQ_PENDING; /* we _want_ to handle it */
-
- /*
- * If the IRQ is disabled for whatever reason, we cannot
- * use the action we have.
- */
- action = NULL;
- if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
- action = desc->action;
- status &= ~IRQ_PENDING; /* we commit to handling */
- status |= IRQ_INPROGRESS; /* we are handling it */
- }
- desc->status = status;
-
- /*
- * If there is no IRQ handler or it was disabled, exit early.
- * Since we set PENDING, if another processor is handling
- * a different instance of this same irq, the other processor
- * will take care of it.
- */
- if (unlikely(!action))
- goto out;
-
- /*
- * Edge triggered interrupts need to remember
- * pending events.
- * This applies to any hw interrupts that allow a second
- * instance of the same irq to arrive while we are in do_IRQ
- * or in the handler. But the code here only handles the _second_
- * instance of the irq, not the third or fourth. So it is mostly
- * useful for irq hardware that does not mask cleanly in an
- * SMP environment.
- */
- for (;;) {
- spin_unlock_irq(&desc->lock);
- action->handler(irq, action->dev_id, regs);
- spin_lock_irq(&desc->lock);
-
- if (likely(!(desc->status & IRQ_PENDING)))
- break;
-
- desc->status &= ~IRQ_PENDING;
- }
- desc->status &= ~IRQ_INPROGRESS;
-
-out:
- /*
- * The ->end() handler has to deal with interrupts which got
- * disabled while the handler was running.
- */
- desc->handler->end(desc);
- spin_unlock(&desc->lock);
-
- return 1;
-}
-
-/*
- * IRQ autodetection code..
- *
- * This depends on the fact that any interrupt that
- * comes in on to an unassigned handler will get stuck
- * with "IRQ_WAITING" cleared and the interrupt
- * disabled.
- */
-
-int setup_vector(unsigned int vector, struct irqaction * new)
-{
- unsigned long flags;
- struct irqaction *old, **p;
- irq_desc_t *desc = irq_descp(vector);
-
- /*
- * The following block of code has to be executed atomically
- */
- spin_lock_irqsave(&desc->lock,flags);
- p = &desc->action;
- if ((old = *p) != NULL) {
- spin_unlock_irqrestore(&desc->lock,flags);
- return -EBUSY;
- }
-
- *p = new;
-
- desc->arch.depth = 0;
- desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_GUEST);
- desc->handler->startup(desc);
- desc->handler->enable(desc);
- desc->arch.vector = vector;
- spin_unlock_irqrestore(&desc->lock,flags);
-
- return 0;
-}
-
-/* Vectors reserved by xen (and thus not sharable with domains). */
-unsigned long ia64_xen_vector[BITS_TO_LONGS(NR_IRQS)];
-
-int __init setup_irq_vector(unsigned int vec, struct irqaction * new)
-{
- int res;
-
- if ( vec == IA64_INVALID_VECTOR )
- return -ENOSYS;
- /* Reserve the vector (and thus the irq). */
- if (test_and_set_bit(vec, ia64_xen_vector))
- return -EBUSY;
- res = setup_vector (vec, new);
- return res;
-}
-
-void __init release_irq_vector(unsigned int vec)
-{
- unsigned long flags;
- irq_desc_t *desc;
-
- if ( vec == IA64_INVALID_VECTOR )
- return;
-
- desc = irq_descp(vec);
-
- spin_lock_irqsave(&desc->lock, flags);
- clear_bit(vec, ia64_xen_vector);
- desc->action = NULL;
- desc->arch.depth = 1;
- desc->status |= IRQ_DISABLED;
- desc->handler->shutdown(desc);
- desc->arch.vector = -1;
- spin_unlock_irqrestore(&desc->lock, flags);
-
- while (desc->status & IRQ_INPROGRESS)
- cpu_relax();
-}
-
-/*
- * HANDLING OF GUEST-BOUND PHYSICAL IRQS
- */
-
-#define IRQ_MAX_GUESTS 7
-typedef struct {
- u8 nr_guests;
- u8 in_flight;
- u8 shareable;
- u8 ack_type;
-#define ACKTYPE_NONE 0 /* No final acknowledgement is required */
-#define ACKTYPE_UNMASK 1 /* Unmask notification is required */
- struct domain *guest[IRQ_MAX_GUESTS];
-} irq_guest_action_t;
-
-static inline void set_pirq_eoi(struct domain *d, unsigned int irq)
-{
- if ( d->arch.pirq_eoi_map )
- set_bit(irq, d->arch.pirq_eoi_map);
-}
-
-static inline void clear_pirq_eoi(struct domain *d, unsigned int irq)
-{
- if ( d->arch.pirq_eoi_map )
- clear_bit(irq, d->arch.pirq_eoi_map);
-}
-
-static void _irq_guest_eoi(irq_desc_t *desc)
-{
- irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
- unsigned int i, vector = desc - irq_desc;
-
- if ( !(desc->status & IRQ_GUEST_EOI_PENDING) )
- return;
-
- for ( i = 0; i < action->nr_guests; ++i )
- clear_pirq_eoi(action->guest[i], vector);
-
- desc->status &= ~(IRQ_INPROGRESS|IRQ_GUEST_EOI_PENDING);
- desc->handler->enable(desc);
-}
-
-static struct timer irq_guest_eoi_timer[NR_IRQS];
-static void irq_guest_eoi_timer_fn(void *data)
-{
- irq_desc_t *desc = data;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock, flags);
- _irq_guest_eoi(desc);
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-void __do_IRQ_guest(int irq)
-{
- irq_desc_t *desc = &irq_desc[irq];
- irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
- struct domain *d;
- struct pirq *pirq;
- int i, already_pending = 0;
-
- for ( i = 0; i < action->nr_guests; i++ )
- {
- d = action->guest[i];
- pirq = pirq_info(d, irq);
- if ( (action->ack_type != ACKTYPE_NONE) &&
- !test_and_set_bool(pirq->masked) )
- action->in_flight++;
- if ( hvm_do_IRQ_dpci(d, pirq) )
- {
- if ( action->ack_type == ACKTYPE_NONE )
- {
- already_pending += !!(desc->status & IRQ_INPROGRESS);
- desc->status |= IRQ_INPROGRESS; /* cleared during hvm eoi */
- }
- }
- else if ( send_guest_pirq(d, pirq) &&
- (action->ack_type == ACKTYPE_NONE) )
- {
- already_pending++;
- }
- }
-
- if ( already_pending == action->nr_guests )
- {
- stop_timer(&irq_guest_eoi_timer[irq]);
- desc->handler->disable(desc);
- desc->status |= IRQ_GUEST_EOI_PENDING;
- for ( i = 0; i < already_pending; ++i )
- {
- d = action->guest[i];
- set_pirq_eoi(d, irq);
- /*
- * Could check here whether the guest unmasked the event by now
- * (or perhaps just re-issue the send_guest_pirq()), and if it
- * can now accept the event,
- * - clear all the pirq_eoi bits we already set,
- * - re-enable the vector, and
- * - skip the timer setup below.
- */
- }
- init_timer(&irq_guest_eoi_timer[irq],
- irq_guest_eoi_timer_fn, desc, smp_processor_id());
- set_timer(&irq_guest_eoi_timer[irq], NOW() + MILLISECS(1));
- }
-}
-
-static int pirq_acktype(int irq)
-{
- irq_desc_t *desc = &irq_desc[irq];
-
- if (!strcmp(desc->handler->typename, "IO-SAPIC-level"))
- return ACKTYPE_UNMASK;
-
- if (!strcmp(desc->handler->typename, "IO-SAPIC-edge"))
- return ACKTYPE_NONE;
-
- return ACKTYPE_NONE;
-}
-
-void pirq_guest_eoi(struct pirq *pirq)
-{
- irq_desc_t *desc;
- irq_guest_action_t *action;
-
- desc = &irq_desc[pirq->pirq];
- spin_lock_irq(&desc->lock);
- action = (irq_guest_action_t *)desc->action;
-
- if ( action->ack_type == ACKTYPE_NONE )
- {
- ASSERT(!pirq->masked);
- stop_timer(&irq_guest_eoi_timer[pirq->pirq]);
- _irq_guest_eoi(desc);
- }
-
- if ( test_and_clear_bool(pirq->masked) && (--action->in_flight == 0) )
- {
- ASSERT(action->ack_type == ACKTYPE_UNMASK);
- desc->handler->end(desc);
- }
- spin_unlock_irq(&desc->lock);
-}
-
-int pirq_guest_unmask(struct domain *d)
-{
- unsigned int pirq = 0, n, i;
- struct pirq *pirqs[16];
- shared_info_t *s = d->shared_info;
-
- do {
- n = radix_tree_gang_lookup(&d->pirq_tree, (void **)pirqs, pirq,
- ARRAY_SIZE(pirqs));
- for ( i = 0; i < n; ++i )
- {
- pirq = pirqs[i]->pirq;
- if ( pirqs[i]->masked &&
- !test_bit(pirqs[i]->evtchn, &s->evtchn_mask[0]) )
- pirq_guest_eoi(pirqs[i]);
- }
- } while ( ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
-
- return 0;
-}
-
-int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share)
-{
- irq_desc_t *desc = &irq_desc[pirq->pirq];
- irq_guest_action_t *action;
- unsigned long flags;
- int rc = 0;
-
- spin_lock_irqsave(&desc->lock, flags);
-
- if (desc->handler == &no_irq_type) {
- spin_unlock_irqrestore(&desc->lock, flags);
- return -ENOSYS;
- }
-
- action = (irq_guest_action_t *)desc->action;
-
- if ( !(desc->status & IRQ_GUEST) )
- {
- if ( desc->action != NULL )
- {
- gdprintk(XENLOG_INFO,
- "Cannot bind IRQ %d to guest. In use by '%s'.\n",
- pirq->pirq, desc->action->name);
- rc = -EBUSY;
- goto out;
- }
-
- action = xmalloc(irq_guest_action_t);
- if ( (desc->action = (struct irqaction *)action) == NULL )
- {
- gdprintk(XENLOG_INFO,
- "Cannot bind IRQ %d to guest. Out of memory.\n",
- pirq->pirq);
- rc = -ENOMEM;
- goto out;
- }
-
- action->nr_guests = 0;
- action->in_flight = 0;
- action->shareable = will_share;
- action->ack_type = pirq_acktype(pirq->pirq);
-
- desc->arch.depth = 0;
- desc->status |= IRQ_GUEST;
- desc->status &= ~IRQ_DISABLED;
- desc->handler->startup(desc);
-
- /* Attempt to bind the interrupt target to the correct CPU. */
-#if 0 /* FIXME CONFIG_SMP ??? */
- if ( desc->handler->set_affinity != NULL )
- desc->handler->set_affinity(
- irq, apicid_to_phys_cpu_present(d->processor));
-#endif
- }
- else if ( !will_share || !action->shareable )
- {
- gdprintk(XENLOG_INFO,
- "Cannot bind IRQ %d to guest. Will not share with others.\n",
- pirq->pirq);
- rc = -EBUSY;
- goto out;
- }
-
- if ( action->nr_guests == IRQ_MAX_GUESTS )
- {
- gdprintk(XENLOG_INFO,
- "Cannot bind IRQ %d to guest. Already at max share.\n",
- pirq->pirq);
- rc = -EBUSY;
- goto out;
- }
-
- action->guest[action->nr_guests++] = v->domain;
-
- if ( action->ack_type != ACKTYPE_NONE )
- set_pirq_eoi(v->domain, pirq->pirq);
- else
- clear_pirq_eoi(v->domain, pirq->pirq);
-
- out:
- spin_unlock_irqrestore(&desc->lock, flags);
- return rc;
-}
-
-void pirq_guest_unbind(struct domain *d, struct pirq *pirq)
-{
- irq_desc_t *desc = &irq_desc[pirq->pirq];
- irq_guest_action_t *action;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&desc->lock, flags);
-
- action = (irq_guest_action_t *)desc->action;
-
- for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
- continue;
- BUG_ON(i == action->nr_guests);
- memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
- action->nr_guests--;
-
- if ( action->ack_type == ACKTYPE_UNMASK )
- if ( test_and_clear_bool(pirq->masked) &&
- (--action->in_flight == 0) )
- desc->handler->end(desc);
-
- if ( !action->nr_guests )
- {
- BUG_ON(action->in_flight != 0);
- desc->action = NULL;
- xfree(action);
- desc->arch.depth = 1;
- desc->status |= IRQ_DISABLED;
- desc->status &= ~IRQ_GUEST;
- desc->handler->shutdown(desc);
- }
-
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-void
-xen_debug_irq(unsigned long vector, struct pt_regs *regs)
-{
-//FIXME: For debug only, can be removed
- static char firstirq = 1;
- static char firsttime[256];
- static char firstpend[256];
- if (firstirq) {
- int i;
- for (i=0;i<256;i++) firsttime[i] = 1;
- for (i=0;i<256;i++) firstpend[i] = 1;
- firstirq = 0;
- }
- if (firsttime[vector]) {
- printk("**** (entry) First received int on vector=%lu,itc=%lx\n",
- (unsigned long) vector, ia64_get_itc());
- firsttime[vector] = 0;
- }
-}
-
-void pirq_set_affinity(struct domain *d, int irq, const cpumask_t *mask)
-{
- /* FIXME */
-}
-
-void (pirq_cleanup_check)(struct pirq *pirq, struct domain *d)
-{
- /*
- * Check whether all fields have their default values, and delete
- * the entry from the tree if so.
- *
- * NB: Common parts were already checked.
- */
- if ( !pt_pirq_cleanup_check(&pirq->arch.dpci) )
- return;
-
- if ( radix_tree_delete(&d->pirq_tree, pirq->pirq) != pirq )
- BUG();
-}
-/*
- * Exit an interrupt context. Process softirqs if needed and possible:
- */
-void irq_exit(void)
-{
- preempt_count() -= IRQ_EXIT_OFFSET;/* sub_preempt_count(IRQ_EXIT_OFFSET); */
-}
diff --git a/xen/arch/ia64/xen/ivt.S b/xen/arch/ia64/xen/ivt.S
deleted file mode 100644
index e6feb539ad..0000000000
--- a/xen/arch/ia64/xen/ivt.S
+++ /dev/null
@@ -1,1435 +0,0 @@
-#include <asm/debugger.h>
-#include <asm/vhpt.h>
-#include <public/arch-ia64.h>
-/*
- * arch/ia64/kernel/ivt.S
- *
- * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger <davidm@hpl.hp.com>
- * Copyright (C) 2000, 2002-2003 Intel Co
- * Asit Mallick <asit.k.mallick@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Kenneth Chen <kenneth.w.chen@intel.com>
- * Fenghua Yu <fenghua.yu@intel.com>
- *
- * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
- * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now
- * uses virtual PT.
- */
-/*
- * This file defines the interruption vector table used by the CPU.
- * It does not include one entry per possible cause of interruption.
- *
- * The first 20 entries of the table contain 64 bundles each while the
- * remaining 48 entries contain only 16 bundles each.
- *
- * The 64 bundles are used to allow inlining the whole handler for critical
- * interruptions like TLB misses.
- *
- * For each entry, the comment is as follows:
- *
- * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
- * entry offset ----/ / / / /
- * entry number ---------/ / / /
- * size of the entry -------------/ / /
- * vector name -------------------------------------/ /
- * interruptions triggering this vector ----------------------/
- *
- * The table is 32KB in size and must be aligned on 32KB boundary.
- * (The CPU ignores the 15 lower bits of the address)
- *
- * Table is based upon EAS2.6 (Oct 1999)
- */
-
-#include <asm/asmmacro.h>
-#include <asm/break.h>
-#include <asm/ia32.h>
-#include <asm/kregs.h>
-#include <asm/offsets.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-#include <asm/thread_info.h>
-#include <asm/unistd.h>
-#include <xen/errno.h>
-#include <linux/efi.h>
-
-#if 1
-# define PSR_DEFAULT_BITS psr.ac
-#else
-# define PSR_DEFAULT_BITS 0
-#endif
-
-#if 0
- /*
- * This lets you track the last eight faults that occurred on the CPU.
- * Make sure ar.k2 isn't needed for something else before enabling this...
- */
-# define DBG_FAULT(i) \
- mov r16=ar.k2;; \
- shl r16=r16,8;; \
- add r16=(i),r16;; \
- mov ar.k2=r16
-#else
-# define DBG_FAULT(i)
-#endif
-
-#define MINSTATE_VIRT /* needed by minstate.h */
-#include "minstate.h"
-
-#define FAULT(n) \
- mov r19=n; /* prepare to save predicates */ \
- mov r31=pr; \
- br.sptk.many dispatch_to_fault_handler
-
-#define FAULT_OR_REFLECT(n) \
- mov r20=cr.ipsr; \
- mov r19=n; /* prepare to save predicates */ \
- mov r31=pr;; \
- extr.u r20=r20,IA64_PSR_CPL0_BIT,2;; \
- cmp.ne p6,p0=r0,r20; /* cpl != 0?*/ \
-(p6) br.dptk.many dispatch_reflection; \
- br.sptk.few dispatch_to_fault_handler
-
- .section .text.ivt,"ax"
-
- .align 32768 // align on 32KB boundary
- .global ia64_ivt
-ia64_ivt:
-//////////////////////////////////////////////////////////////////////////
-// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
-ENTRY(vhpt_miss)
- DBG_FAULT(0)
- FAULT(0)
-END(vhpt_miss)
-
- .org ia64_ivt+0x400
-//////////////////////////////////////////////////////////////////////////
-// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
-ENTRY(itlb_miss)
- DBG_FAULT(1)
- mov r16 = cr.ifa
- mov r31 = pr
- ;;
- extr.u r17=r16,59,5
- ;;
- /* If address belongs to VMM, go to alt tlb handler */
- cmp.eq p6,p0=0x1e,r17
-(p6) br.cond.spnt late_alt_itlb_miss
- br.cond.sptk fast_tlb_miss_reflect
- ;;
-END(itlb_miss)
-
- .org ia64_ivt+0x0800
-//////////////////////////////////////////////////////////////////////////
-// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
-ENTRY(dtlb_miss)
- DBG_FAULT(2)
- mov r16=cr.ifa // get virtual address
- mov r31=pr
- ;;
- extr.u r17=r16,59,5
- ;;
- /* If address belongs to VMM, go to alt tlb handler */
- cmp.eq p6,p0=0x1e,r17
-(p6) br.cond.spnt late_alt_dtlb_miss
- br.cond.sptk fast_tlb_miss_reflect
- ;;
-END(dtlb_miss)
-
- .org ia64_ivt+0x0c00
-//////////////////////////////////////////////////////////////////////////
-// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
-ENTRY(alt_itlb_miss)
- DBG_FAULT(3)
- mov r16=cr.ifa // get address that caused the TLB miss
- mov r31=pr
- ;;
-late_alt_itlb_miss:
- mov r21=cr.ipsr
- movl r17=PAGE_KERNEL
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- ;;
- mov r20=cr.itir
- extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
- and r19=r19,r16 // clear ed, reserved bits, and PTE ctrl bits
- extr.u r18=r16,XEN_VIRT_UC_BIT,1 // extract UC bit
- ;;
- cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
- or r19=r17,r19 // insert PTE control bits into r19
- dep r20=0,r20,IA64_ITIR_KEY,IA64_ITIR_KEY_LEN // clear the key
- ;;
- dep r19=r18,r19,4,1 // set bit 4 (uncached) if access to UC area.
- mov cr.itir=r20 // set itir with cleared key
-(p8) br.cond.spnt page_fault
- ;;
- itc.i r19 // insert the TLB entry
- mov pr=r31,-1
- rfi
-END(alt_itlb_miss)
-
- .org ia64_ivt+0x1000
-//////////////////////////////////////////////////////////////////////////
-// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
-ENTRY(alt_dtlb_miss)
- DBG_FAULT(4)
- mov r16=cr.ifa // get address that caused the TLB miss
- mov r31=pr
- ;;
-late_alt_dtlb_miss:
- mov r20=cr.isr
- movl r17=PAGE_KERNEL
- mov r29=cr.ipsr // frametable_miss is shared by paravirtual and HVM sides
- // and it assumes ipsr is saved in r29. If change the
- // registers usage here, please check both sides!
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- ;;
- extr.u r23=r29,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
- and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
- tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
- extr.u r18=r16,XEN_VIRT_UC_BIT,1 // extract UC bit
- and r19=r19,r16 // clear ed, reserved bits, and
- // PTE control bits
- tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
- ;;
- cmp.ne p8,p0=r0,r23
-(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
-(p8) br.cond.spnt page_fault
- ;;
- mov r20=cr.itir
-#ifdef CONFIG_VIRTUAL_FRAME_TABLE
- shr r22=r16,56 // Test for the address of virtual frame_table
- ;;
- cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
-(p8) br.cond.sptk frametable_miss ;;
-#endif
- // !( (r22 == 0x18 && rr6 == XEN_EFI_RR6) ||
- // (r22 == 0x1c && rr7 == XEN_EFI_RR7) ||
- // r22 == 0x1e)
-
- extr.u r22=r16,59,5
- ;;
- dep r20=0,r20,IA64_ITIR_KEY,IA64_ITIR_KEY_LEN // clear the key
- movl r24=6 << 61
- movl r23=7 << 61
- ;;
- mov r24=rr[r24]
- mov r23=rr[r23]
- ;;
- movl r26=XEN_EFI_RR6
- movl r25=XEN_EFI_RR7
-
- cmp.eq p8,p0=0x18,r22 // 0xc...
- cmp.eq p9,p0=0x1c,r22 // 0xe...
- ;;
- cmp.eq.and p8,p0=r26,r24 // rr6 == XEN_EFI_RR6
- cmp.eq.and p9,p0=r25,r23 // rr7 == XEN_EFI_RR7
- ;;
- cmp.eq.or p9,p0=0x1e,r22 // 0xf...
-(p8) br.cond.spnt alt_dtlb_miss_identity_map
-(p9) br.cond.spnt alt_dtlb_miss_identity_map
- br.cond.spnt page_fault
- ;;
-alt_dtlb_miss_identity_map:
- dep r29=-1,r29,IA64_PSR_ED_BIT,1
- or r19=r19,r17 // insert PTE control bits into r19
- mov cr.itir=r20 // set itir with cleared key
- ;;
- cmp.ne p8,p0=r0,r18 // Xen UC bit set
- ;;
- cmp.eq.or p8,p0=0x18,r22 // Region 6 is UC for EFI
- ;;
-(p8) dep r19=-1,r19,4,1 // set bit 4 (uncached) if access to UC area
-(p6) mov cr.ipsr=r29
- ;;
-(p7) itc.d r19 // insert the TLB entry
- mov pr=r31,-1
- rfi
-END(alt_dtlb_miss)
-
-#ifdef CONFIG_VIRTUAL_FRAME_TABLE
-GLOBAL_ENTRY(frametable_miss)
- rsm psr.dt // switch to using physical data addressing
- movl r24=(frametable_pg_dir-PAGE_OFFSET) // r24=__pa(frametable_pg_dir)
- ;;
- srlz.d
- extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3)
- ;;
- shladd r24=r17,3,r24 // r24=&pgd[pgd_offset(addr)]
- ;;
- ld8 r24=[r24] // r24=pgd[pgd_offset(addr)]
- extr.u r18=r16,PMD_SHIFT,(PAGE_SHIFT-3) // r18=pmd_offset
- ;;
- cmp.eq p6,p7=0,r24 // pgd present?
- shladd r24=r18,3,r24 // r24=&pmd[pmd_offset(addr)]
- ;;
-(p7) ld8 r24=[r24] // r24=pmd[pmd_offset(addr)]
- extr.u r19=r16,PAGE_SHIFT,(PAGE_SHIFT-3)// r19=pte_offset
-(p6) br.spnt.few frametable_fault
- ;;
- cmp.eq p6,p7=0,r24 // pmd present?
- shladd r24=r19,3,r24 // r24=&pte[pte_offset(addr)]
- ;;
-(p7) ld8 r24=[r24] // r24=pte[pte_offset(addr)]
- mov r25=(PAGE_SHIFT<<IA64_ITIR_PS)
-(p6) br.spnt.few frametable_fault
- ;;
- mov cr.itir=r25
- ssm psr.dt // switch to using virtual data addressing
- tbit.z p6,p7=r24,_PAGE_P_BIT // pte present?
- ;;
-(p7) itc.d r24 // install updated PTE
-(p6) br.spnt.few frametable_fault // page present bit cleared?
- ;;
- mov pr=r31,-1 // restore predicate registers
- rfi
-END(frametable_miss)
-
-ENTRY(frametable_fault) //ipsr saved in r29 before coming here!
- ssm psr.dt // switch to using virtual data addressing
- mov r18=cr.iip
- movl r19=ia64_frametable_probe
- ;;
- cmp.eq p6,p7=r18,r19 // is faulting addrress ia64_frametable_probe?
- mov r8=0 // assumes that 'probe.r' uses r8
- dep r29=-1,r29,IA64_PSR_RI_BIT+1,1 // return to next instruction in
- // bundle 2
- ;;
-(p6) mov cr.ipsr=r29
- mov r19=4 // FAULT(4)
-(p7) br.spnt.few dispatch_to_fault_handler
- ;;
- mov pr=r31,-1
- rfi
-END(frametable_fault)
-
-GLOBAL_ENTRY(ia64_frametable_probe)
- {
- probe.r r8=r32,0 // destination register must be r8
- nop.f 0x0
- br.ret.sptk.many b0 // this instruction must be in bundle 2
- }
-END(ia64_frametable_probe)
-#endif /* CONFIG_VIRTUAL_FRAME_TABLE */
-
- .org ia64_ivt+0x1400
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
-ENTRY(nested_dtlb_miss)
- DBG_FAULT(5)
- mov b0=r30
- br.sptk.many b0 // return to the continuation point
- ;;
-END(nested_dtlb_miss)
-
-GLOBAL_ENTRY(dispatch_reflection)
- /*
- * Input:
- * psr.ic: off
- * r19: intr type (offset into ivt, see ia64_int.h)
- * r31: contains saved predicates (pr)
- */
- SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,5,0
- mov out4=r15
- mov out0=cr.ifa
- adds out1=16,sp
- mov out2=cr.isr
- mov out3=cr.iim
-
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption
- // collection is on
- ;;
-(p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer
- ;;
- SAVE_REST
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
-// br.sptk.many ia64_prepare_handle_reflection // TODO: why commented out?
- br.call.sptk.many b6=ia64_handle_reflection
-END(dispatch_reflection)
-
- .org ia64_ivt+0x1800
-//////////////////////////////////////////////////////////////////////////
-// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
-ENTRY(ikey_miss)
- DBG_FAULT(6)
- FAULT_OR_REFLECT(6)
-END(ikey_miss)
-
- //----------------------------------------------------------------
- // call do_page_fault (predicates are in r31, psr.dt may be off,
- // r16 is faulting address)
-GLOBAL_ENTRY(page_fault)
- ssm psr.dt
- ;;
- srlz.i
- ;;
- SAVE_MIN_WITH_COVER
- alloc r15=ar.pfs,0,0,4,0
- mov out0=cr.ifa
- mov out1=cr.isr
- mov out3=cr.itir
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption
- // collection is on
- ;;
-(p15) ssm psr.i // restore psr.i
- movl r14=ia64_leave_kernel
- ;;
- SAVE_REST
- mov rp=r14
- ;;
- adds out2=16,r12 // out2 = pointer to pt_regs
- br.call.sptk.many b6=ia64_do_page_fault // ignore return address
-END(page_fault)
-
- .org ia64_ivt+0x1c00
-//////////////////////////////////////////////////////////////////////////
-// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
-ENTRY(dkey_miss)
- DBG_FAULT(7)
- FAULT_OR_REFLECT(7)
-END(dkey_miss)
-
- .org ia64_ivt+0x2000
-//////////////////////////////////////////////////////////////////////////
-// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
-ENTRY(dirty_bit)
- DBG_FAULT(8)
- mov r20=cr.ipsr
- mov r31=pr
- ;;
- extr.u r20=r20,IA64_PSR_CPL0_BIT,2
- ;;
- mov r19=8 // prepare to save predicates
- cmp.eq p6,p0=r0,r20 // cpl == 0?
-(p6) br.sptk.few dispatch_to_fault_handler
- // If shadow mode is not enabled, reflect the fault.
- movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
- ;;
- ld8 r22=[r22]
- ;;
- add r22=IA64_VCPU_SHADOW_BITMAP_OFFSET,r22
- ;;
- ld8 r22=[r22]
- ;;
- cmp.eq p6,p0=r0,r22 // !shadow_bitmap ?
-(p6) br.dptk.many dispatch_reflection
-
- SAVE_MIN_WITH_COVER
- alloc r14=ar.pfs,0,0,4,0
- mov out0=cr.ifa
- mov out1=cr.itir
- mov out2=cr.isr
- adds out3=16,sp
-
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption
- // collection is on
- ;;
-(p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer
- ;;
- SAVE_REST
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
- br.call.sptk.many b6=ia64_shadow_fault
-END(dirty_bit)
-
- .org ia64_ivt+0x2400
-//////////////////////////////////////////////////////////////////////////
-// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
-ENTRY(iaccess_bit)
- DBG_FAULT(9)
- mov r16=cr.isr
- mov r17=cr.ifa
- mov r31=pr
- mov r19=9
- mov r20=0x2400
- br.sptk.many fast_access_reflect;;
-END(iaccess_bit)
-
- .org ia64_ivt+0x2800
-//////////////////////////////////////////////////////////////////////////
-// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
-ENTRY(daccess_bit)
- DBG_FAULT(10)
- mov r16=cr.isr
- mov r17=cr.ifa
- mov r18=cr.ipsr
- mov r31=pr
- mov r19=10
- ;;
- mov r20=0x2800
- extr.u r18=r18,IA64_PSR_CPL0_BIT,2
- ;;
- cmp.ne p6,p0=r0,r18 /* cpl != 0? */
-(p6) br.sptk.many fast_access_reflect
- /* __domain_get_bundle() may cause this fault. */
- br.sptk.few dispatch_to_fault_handler
- ;;
-END(daccess_bit)
-
- .org ia64_ivt+0x2c00
-//////////////////////////////////////////////////////////////////////////
-// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
-ENTRY(break_fault)
- .body
- /*
- * The streamlined system call entry/exit paths only save/restore
- * the initial part of pt_regs. This implies that the callers of
- * system-calls must adhere to the normal procedure calling
- * conventions.
- *
- * Registers to be saved & restored:
- * CR registers: cr.ipsr, cr.iip, cr.ifs
- * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore,
- * ar.fpsr
- * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
- * Registers to be restored only:
- * r8-r11: output value from the system call.
- *
- * During system call exit, scratch registers (including r15) are
- * modified/cleared to prevent leaking bits from kernel to user
- * level.
- */
- DBG_FAULT(11)
- mov r16=cr.isr
- mov r17=cr.iim
- mov r31=pr
- ;;
- cmp.eq p7,p0=r17,r0
-(p7) br.spnt.few dispatch_break_fault
- ;;
-#ifdef CRASH_DEBUG
- // A panic can occur before domain0 is created. In such cases,
- // referencing XSI_PSR_IC causes nested_dtlb_miss.
- movl r18=CDB_BREAK_NUM
- ;;
- cmp.eq p7,p0=r17,r18
- ;;
-(p7) br.spnt.few dispatch_break_fault
- ;;
-#endif
- movl r18=THIS_CPU(current_psr_ic_addr)
- ;;
- ld8 r18=[r18]
- ;;
-#ifdef CONFIG_PRIVIFY
- // pseudo-cover are replaced by break.b which (unfortunatly) always
- // clear iim.
- cmp.eq p7,p0=r0,r17
-(p7) br.spnt.many dispatch_privop_fault
- ;;
-#endif
- // if (ipsr.cpl == CONFIG_CPL0_EMUL &&
- // (iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX)
- // this is a hyperprivop. A hyperprivop is hand-coded assembly with
- // psr.ic off which means it can make no calls, cannot use r1-r15,
- // and it can have no memory accesses unless they are to pinned
- // addresses!
- mov r19= cr.ipsr
- mov r20=HYPERPRIVOP_START
- mov r21=HYPERPRIVOP_MAX
- ;;
- sub r20=r17,r20
- extr.u r19=r19,IA64_PSR_CPL0_BIT,2 // extract cpl field from cr.ipsr
- ;;
- cmp.gtu p7,p0=r21,r20
- ;;
- cmp.eq.and p7,p0=CONFIG_CPL0_EMUL,r19 // ipsr.cpl==CONFIG_CPL0_EMUL
-(p7) br.sptk.many fast_hyperprivop
- ;;
- movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
- ;;
- ld8 r22 = [r22]
- ;;
- adds r23=IA64_VCPU_BREAKIMM_OFFSET,r22
- ;;
- ld4 r23=[r23];;
- cmp4.eq p6,p0=r23,r17;; // Xen-reserved breakimm?
- cmp.eq.and p6,p0=CONFIG_CPL0_EMUL,r19
-(p6) br.spnt.many fast_hypercall
- ;;
- br.sptk.many fast_break_reflect
- ;;
-
-
-fast_hypercall:
- shr r25=r2,8;;
- cmp.ne p7,p0=r0,r25
-(p7) br.spnt.few dispatch_break_fault
- ;;
- // fall through
-
-
- /*
- * The streamlined system call entry/exit paths only save/restore the initial part
- * of pt_regs. This implies that the callers of system-calls must adhere to the
- * normal procedure calling conventions.
- *
- * Registers to be saved & restored:
- * CR registers: cr.ipsr, cr.iip, cr.ifs
- * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
- * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
- * Registers to be restored only:
- * r8-r11: output value from the system call.
- *
- * During system call exit, scratch registers (including r15) are modified/cleared
- * to prevent leaking bits from kernel to user level.
- */
-
-// DBG_FAULT(11)
-// mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
- mov r16=r22
- mov r29=cr.ipsr // M2 (12 cyc)
-// mov r31=pr // I0 (2 cyc)
- mov r15=r2
-
-// mov r17=cr.iim // M2 (2 cyc)
- mov.m r27=ar.rsc // M2 (12 cyc)
-// mov r18=__IA64_BREAK_SYSCALL // A
-
- mov.m ar.rsc=0 // M2
- mov.m r21=ar.fpsr // M2 (12 cyc)
- mov r19=b6 // I0 (2 cyc)
- ;;
- mov.m r23=ar.bspstore // M2 (12 cyc)
- mov.m r24=ar.rnat // M2 (5 cyc)
- mov.i r26=ar.pfs // I0 (2 cyc)
-
- invala // M0|1
- nop.m 0 // M
- mov r20=r1 // A save r1
-
- nop.m 0
-// movl r30=sys_call_table // X
- movl r30=ia64_hypercall_table // X
-
- mov r28=cr.iip // M2 (2 cyc)
-// cmp.eq p0,p7=r18,r17 // I0 is this a system call?
-//(p7) br.cond.spnt non_syscall // B no ->
- //
- // From this point on, we are definitely on the syscall-path
- // and we can use (non-banked) scratch registers.
- //
-///////////////////////////////////////////////////////////////////////
- mov r1=r16 // A move task-pointer to "addl"-addressable reg
- mov r2=r16 // A setup r2 for ia64_syscall_setup
-// add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
-
- adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
-// adds r15=-1024,r15 // A subtract 1024 from syscall number
-// mov r3=NR_syscalls - 1
- mov r3=NR_hypercalls - 1
- ;;
- ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
-// ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
- mov r9=r0 // force flags = 0
- extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
-
- shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
- addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
- cmp.leu p6,p7=r15,r3 // A syscall number in range?
- ;;
-
- lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
-(p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
- tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
-
- mov.m ar.bspstore=r22 // M2 switch to kernel RBS
- cmp.eq p8,p9=2,r8 // A isr.ei==2?
- ;;
-
-(p8) mov r8=0 // A clear ei to 0
-//(p7) movl r30=sys_ni_syscall // X
-(p7) movl r30=do_ni_hypercall // X
-
-(p8) adds r28=16,r28 // A switch cr.iip to next bundle
-(p9) adds r8=1,r8 // A increment ei to next slot
- nop.i 0
- ;;
-
- mov.m r25=ar.unat // M2 (5 cyc)
- dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
-// adds r15=1024,r15 // A restore original syscall number
- //
- // If any of the above loads miss in L1D, we'll stall here until
- // the data arrives.
- //
-///////////////////////////////////////////////////////////////////////
- st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
- mov b6=r30 // I0 setup syscall handler branch reg early
- cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
-
-// and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
- mov r18=ar.bsp // M2 (12 cyc)
- ;;
-(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
-// cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
- br.call.sptk.many b7=ia64_syscall_setup // B
-1:
- mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
- nop 0
- bsw.1 // B (6 cyc) regs are saved, switch to bank 1
- ;;
-
- PT_REGS_UNWIND_INFO(-48)
- ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
-// movl r3=ia64_ret_from_syscall // X
- ;;
-
- srlz.i // M0 ensure interruption collection is on
-// mov rp=r3 // I0 set the real return addr
-//(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
-(p15) ssm psr.i // M2 restore psr.i
-//(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
-// br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
- br.call.sptk.many b0=b6 // B invoke syscall-handker (ignore return addr)
-// br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
- ;;
- adds r2=PT(R8)+16,r12
- ;;
- st8 [r2]=r8
- ;;
- br.call.sptk.many b0=do_softirq
- ;;
- //restore hypercall argument if continuation
- adds r2=IA64_VCPU_HYPERCALL_CONTINUATION_OFS,r13
- ;;
- ld1 r20=[r2]
- ;;
- st1 [r2]=r0
- ;;
- cmp.ne p6,p0=r20,r0
- ;;
-(p6) adds r2=PT(R16)+16,r12
-(p6) adds r3=PT(R17)+16,r12
- ;;
-(p6) ld8 r32=[r2],16
-(p6) ld8 r33=[r3],16
- ;;
-(p6) ld8 r34=[r2],16
-(p6) ld8 r35=[r3],16
- ;;
-(p6) ld8 r36=[r2],16
- ;;
-//save ar.bsp before cover
- mov r16=ar.bsp
- add r2=PT(R14)+16,r12
- ;;
- st8 [r2]=r16
- ;;
- rsm psr.i|psr.ic
- ;;
- srlz.i
- ;;
- cover
- ;;
- mov r20=cr.ifs
- adds r2=PT(CR_IFS)+16,r12
- ;;
- st8 [r2]=r20
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i
- ;;
- br.call.sptk.many b0=reflect_event
- ;;
- rsm psr.i|psr.ic
- adds r2=PT(R14)+16,r12
- adds r3=PT(R8)+16,r12
- ;;
- //r16 contains ar.bsp before cover
- ld8 r16=[r2]
- ld8 r8=[r3]
- srlz.i
- ;;
- br.sptk.many ia64_ret_from_syscall
- ;;
-END(break_fault)
-
- .org ia64_ivt+0x3000
-//////////////////////////////////////////////////////////////////////////
-// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
-ENTRY(interrupt)
- DBG_FAULT(12)
- mov r31=pr // prepare to save predicates
- mov r30=cr.ivr // pass cr.ivr as first arg
- // FIXME: this is a hack... use cpuinfo.ksoftirqd because its
- // not used anywhere else and we need a place to stash ivr and
- // there's no registers available unused by SAVE_MIN/REST
- movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET
- ;;
- st8 [r29]=r30
- movl r28=slow_interrupt
- ;;
- mov r29=rp
- ;;
- mov rp=r28
- ;;
- br.cond.sptk.many fast_tick_reflect
- ;;
-slow_interrupt:
- mov rp=r29;;
- SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- srlz.i // ensure everybody knows psr.ic is back on
- ;;
- SAVE_REST
- ;;
- alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
- movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
- ld8 out0=[out0];;
- add out1=16,sp // pass pointer to pt_regs as second arg
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
- br.call.sptk.many b6=ia64_handle_irq
-END(interrupt)
-
- .org ia64_ivt+0x3400
-//////////////////////////////////////////////////////////////////////////
-// 0x3400 Entry 13 (size 64 bundles) Reserved
- DBG_FAULT(13)
- FAULT(13)
-
- // There is no particular reason for this code to be here, other
- // than that there happens to be space here that would go unused
- // otherwise. If this fault ever gets "unreserved", simply move
- // the following code to a more suitable spot...
-
-GLOBAL_ENTRY(dispatch_break_fault)
- SAVE_MIN_WITH_COVER
- ;;
-dispatch_break_fault_post_save:
- alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
- mov out0=cr.ifa
- adds out1=16,sp
- mov out2=cr.isr // FIXME: pity to make this slow access twice
- mov out3=cr.iim // FIXME: pity to make this slow access twice
-
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
-(p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer
- ;;
- SAVE_REST
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
- br.call.sptk.many b6=ia64_handle_break
-END(dispatch_break_fault)
-
- .org ia64_ivt+0x3800
-//////////////////////////////////////////////////////////////////////////
-// 0x3800 Entry 14 (size 64 bundles) Reserved
- DBG_FAULT(14)
- FAULT(14)
-
- // this code segment is from 2.6.16.13
-
- /*
- * There is no particular reason for this code to be here, other than that
- * there happens to be space here that would go unused otherwise. If this
- * fault ever gets "unreserved", simply moved the following code to a more
- * suitable spot...
- *
- * ia64_syscall_setup() is a separate subroutine so that it can
- * allocate stacked registers so it can safely demine any
- * potential NaT values from the input registers.
- *
- * On entry:
- * - executing on bank 0 or bank 1 register set (doesn't matter)
- * - r1: stack pointer
- * - r2: current task pointer
- * - r3: preserved
- * - r11: original contents (saved ar.pfs to be saved)
- * - r12: original contents (sp to be saved)
- * - r13: original contents (tp to be saved)
- * - r15: original contents (syscall # to be saved)
- * - r18: saved bsp (after switching to kernel stack)
- * - r19: saved b6
- * - r20: saved r1 (gp)
- * - r21: saved ar.fpsr
- * - r22: kernel's register backing store base (krbs_base)
- * - r23: saved ar.bspstore
- * - r24: saved ar.rnat
- * - r25: saved ar.unat
- * - r26: saved ar.pfs
- * - r27: saved ar.rsc
- * - r28: saved cr.iip
- * - r29: saved cr.ipsr
- * - r31: saved pr
- * - b0: original contents (to be saved)
- * On exit:
- * - p10: TRUE if syscall is invoked with more than 8 out
- * registers or r15's Nat is true
- * - r1: kernel's gp
- * - r3: preserved (same as on entry)
- * - r8: -EINVAL if p10 is true
- * - r12: points to kernel stack
- * - r13: points to current task
- * - r14: preserved (same as on entry)
- * - p13: preserved
- * - p15: TRUE if interrupts need to be re-enabled
- * - ar.fpsr: set to kernel settings
- * - b6: preserved (same as on entry)
- */
-GLOBAL_ENTRY(ia64_syscall_setup)
-#if PT(B6) != 0
-# error This code assumes that b6 is the first field in pt_regs.
-#endif
- st8 [r1]=r19 // save b6
- add r16=PT(CR_IPSR),r1 // initialize first base pointer
- add r17=PT(R11),r1 // initialize second base pointer
- ;;
- alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
- st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
- tnat.nz p8,p0=in0
-
- st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
- tnat.nz p9,p0=in1
-(pKStk) mov r18=r0 // make sure r18 isn't NaT
- ;;
-
- st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
- st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
- mov r28=b0 // save b0 (2 cyc)
- ;;
-
- st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
- dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
-(p8) mov in0=-1
- ;;
-
- st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
- extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
- and r8=0x7f,r19 // A // get sof of ar.pfs
-
- st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
- tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
-(p9) mov in1=-1
- ;;
-
-(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
- tnat.nz p10,p0=in2
- add r11=8,r11
- ;;
-(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
-(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
- tnat.nz p11,p0=in3
- ;;
-(p10) mov in2=-1
- tnat.nz p12,p0=in4 // [I0]
-(p11) mov in3=-1
- ;;
-(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
-(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
- shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
- ;;
- st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
- st8 [r17]=r28,PT(R1)-PT(B0) // save b0
- tnat.nz p13,p0=in5 // [I0]
- ;;
- st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
- st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
-(p12) mov in4=-1
- ;;
-
-.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
-.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
-(p13) mov in5=-1
- ;;
- st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
- tnat.nz p13,p0=in6
- cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
- ;;
- mov r8=1
-(p9) tnat.nz p10,p0=r15
- adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
-
- st8.spill [r17]=r15 // save r15
- tnat.nz p8,p0=in7
- nop.i 0
-
- mov r13=r2 // establish `current'
- movl r1=__gp // establish kernel global pointer
- ;;
- st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
-(p13) mov in6=-1
-(p8) mov in7=-1
-
- cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
- movl r17=FPSR_DEFAULT
- ;;
- mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
-(p10) mov r8=-EINVAL
- br.ret.sptk.many b7
-END(ia64_syscall_setup)
-
-
- .org ia64_ivt+0x3c00
-//////////////////////////////////////////////////////////////////////////
-// 0x3c00 Entry 15 (size 64 bundles) Reserved
- DBG_FAULT(15)
- FAULT(15)
-
-
- .org ia64_ivt+0x4000
-//////////////////////////////////////////////////////////////////////////
-// 0x4000 Entry 16 (size 64 bundles) Reserved
- DBG_FAULT(16)
- FAULT(16)
-
- // There is no particular reason for this code to be here, other
- // than that there happens to be space here that would go unused
- // otherwise. If this fault ever gets "unreserved", simply move
- // the following code to a more suitable spot...
-
-ENTRY(dispatch_privop_fault)
- SAVE_MIN_WITH_COVER
- ;;
- alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in
- // insn group!)
- mov out0=cr.ifa
- adds out1=16,sp
- mov out2=cr.isr // FIXME: pity to make this slow access twice
- mov out3=cr.itir
-
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption
- // collection is on
- ;;
-(p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer
- ;;
- SAVE_REST
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
- br.call.sptk.many b6=ia64_handle_privop
-END(dispatch_privop_fault)
-
-
- .org ia64_ivt+0x4400
-//////////////////////////////////////////////////////////////////////////
-// 0x4400 Entry 17 (size 64 bundles) Reserved
- DBG_FAULT(17)
- FAULT(17)
-
-
- .org ia64_ivt+0x4800
-//////////////////////////////////////////////////////////////////////////
-// 0x4800 Entry 18 (size 64 bundles) Reserved
- DBG_FAULT(18)
- FAULT(18)
-
-
- .org ia64_ivt+0x4c00
-//////////////////////////////////////////////////////////////////////////
-// 0x4c00 Entry 19 (size 64 bundles) Reserved
- DBG_FAULT(19)
- FAULT(19)
-
- /*
- * There is no particular reason for this code to be here, other
- * than that there happens to be space here that would go unused
- * otherwise. If this fault ever gets "unreserved", simply move
- * the following code to a more suitable spot...
- */
-
-GLOBAL_ENTRY(dispatch_to_fault_handler)
- /*
- * Input:
- * psr.ic: off
- * r19: fault vector number (e.g., 24 for General Exception)
- * r31: contains saved predicates (pr)
- */
- SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,5,0
- mov out0=r15
- mov out1=cr.isr
- mov out2=cr.ifa
- mov out3=cr.iim
- mov out4=cr.itir
- ;;
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption
- // collection is on
- ;;
-(p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer for
- // SAVE_REST
- ;;
- SAVE_REST
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
- br.call.sptk.many b6=ia64_fault
-END(dispatch_to_fault_handler)
-
-//
-// --- End of long entries, Beginning of short entries
-//
-
- .org ia64_ivt+0x5000
-//////////////////////////////////////////////////////////////////////////
-// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
-ENTRY(page_not_present)
- DBG_FAULT(20)
- FAULT_OR_REFLECT(20)
-END(page_not_present)
-
- .org ia64_ivt+0x5100
-//////////////////////////////////////////////////////////////////////////
-// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
-ENTRY(key_permission)
- DBG_FAULT(21)
- FAULT_OR_REFLECT(21)
-END(key_permission)
-
- .org ia64_ivt+0x5200
-//////////////////////////////////////////////////////////////////////////
-// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
-ENTRY(iaccess_rights)
- DBG_FAULT(22)
- FAULT_OR_REFLECT(22)
-END(iaccess_rights)
-
- .org ia64_ivt+0x5300
-//////////////////////////////////////////////////////////////////////////
-// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
-ENTRY(daccess_rights)
- DBG_FAULT(23)
- mov r31=pr
- mov r16=cr.isr
- mov r17=cr.ifa
- mov r19=23
- mov r20=0x5300
- br.sptk.many fast_access_reflect
- ;;
-END(daccess_rights)
-
- .org ia64_ivt+0x5400
-//////////////////////////////////////////////////////////////////////////
-// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
-ENTRY(general_exception)
- DBG_FAULT(24)
- mov r16=cr.isr
- mov r31=pr
- ;;
- cmp4.ge p6,p0=0x20,r16
-(p6) br.sptk.many dispatch_privop_fault
- ;;
- FAULT_OR_REFLECT(24)
-END(general_exception)
-
- .org ia64_ivt+0x5500
-//////////////////////////////////////////////////////////////////////////
-// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
-ENTRY(disabled_fp_reg)
- DBG_FAULT(25)
- FAULT_OR_REFLECT(25)
-END(disabled_fp_reg)
-
- .org ia64_ivt+0x5600
-//////////////////////////////////////////////////////////////////////////
-// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
-ENTRY(nat_consumption)
- DBG_FAULT(26)
- FAULT_OR_REFLECT(26)
-END(nat_consumption)
-
- .org ia64_ivt+0x5700
-//////////////////////////////////////////////////////////////////////////
-// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
-ENTRY(speculation_vector)
- DBG_FAULT(27)
- // this probably need not reflect...
- FAULT_OR_REFLECT(27)
-END(speculation_vector)
-
- .org ia64_ivt+0x5800
-//////////////////////////////////////////////////////////////////////////
-// 0x5800 Entry 28 (size 16 bundles) Reserved
- DBG_FAULT(28)
- FAULT(28)
-
- .org ia64_ivt+0x5900
-//////////////////////////////////////////////////////////////////////////
-// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
-ENTRY(debug_vector)
- DBG_FAULT(29)
- FAULT_OR_REFLECT(29)
-END(debug_vector)
-
- .org ia64_ivt+0x5a00
-//////////////////////////////////////////////////////////////////////////
-// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
-ENTRY(unaligned_access)
- DBG_FAULT(30)
- FAULT_OR_REFLECT(30)
-END(unaligned_access)
-
- .org ia64_ivt+0x5b00
-//////////////////////////////////////////////////////////////////////////
-// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
-ENTRY(unsupported_data_reference)
- DBG_FAULT(31)
- FAULT_OR_REFLECT(31)
-END(unsupported_data_reference)
-
- .org ia64_ivt+0x5c00
-//////////////////////////////////////////////////////////////////////////
-// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
-ENTRY(floating_point_fault)
- DBG_FAULT(32)
- FAULT_OR_REFLECT(32)
-END(floating_point_fault)
-
- .org ia64_ivt+0x5d00
-//////////////////////////////////////////////////////////////////////////
-// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
-ENTRY(floating_point_trap)
- DBG_FAULT(33)
- FAULT_OR_REFLECT(33)
-END(floating_point_trap)
-
- .org ia64_ivt+0x5e00
-//////////////////////////////////////////////////////////////////////////
-// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
-ENTRY(lower_privilege_trap)
- DBG_FAULT(34)
- FAULT_OR_REFLECT(34)
-END(lower_privilege_trap)
-
- .org ia64_ivt+0x5f00
-//////////////////////////////////////////////////////////////////////////
-// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
-ENTRY(taken_branch_trap)
- DBG_FAULT(35)
- FAULT_OR_REFLECT(35)
-END(taken_branch_trap)
-
- .org ia64_ivt+0x6000
-//////////////////////////////////////////////////////////////////////////
-// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
-ENTRY(single_step_trap)
- DBG_FAULT(36)
- FAULT_OR_REFLECT(36)
-END(single_step_trap)
-
- .org ia64_ivt+0x6100
-//////////////////////////////////////////////////////////////////////////
-// 0x6100 Entry 37 (size 16 bundles) Reserved
- DBG_FAULT(37)
- FAULT(37)
-
- .org ia64_ivt+0x6200
-//////////////////////////////////////////////////////////////////////////
-// 0x6200 Entry 38 (size 16 bundles) Reserved
- DBG_FAULT(38)
- FAULT(38)
-
- .org ia64_ivt+0x6300
-//////////////////////////////////////////////////////////////////////////
-// 0x6300 Entry 39 (size 16 bundles) Reserved
- DBG_FAULT(39)
- FAULT(39)
-
- .org ia64_ivt+0x6400
-//////////////////////////////////////////////////////////////////////////
-// 0x6400 Entry 40 (size 16 bundles) Reserved
- DBG_FAULT(40)
- FAULT(40)
-
- .org ia64_ivt+0x6500
-//////////////////////////////////////////////////////////////////////////
-// 0x6500 Entry 41 (size 16 bundles) Reserved
- DBG_FAULT(41)
- FAULT(41)
-
- .org ia64_ivt+0x6600
-//////////////////////////////////////////////////////////////////////////
-// 0x6600 Entry 42 (size 16 bundles) Reserved
- DBG_FAULT(42)
- FAULT(42)
-
- .org ia64_ivt+0x6700
-//////////////////////////////////////////////////////////////////////////
-// 0x6700 Entry 43 (size 16 bundles) Reserved
- DBG_FAULT(43)
- FAULT(43)
-
- .org ia64_ivt+0x6800
-//////////////////////////////////////////////////////////////////////////
-// 0x6800 Entry 44 (size 16 bundles) Reserved
- DBG_FAULT(44)
- FAULT(44)
-
- .org ia64_ivt+0x6900
-//////////////////////////////////////////////////////////////////////////
-// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,
-// 44,58,60,61,62,72,
-// 73,75,76,77)
-ENTRY(ia32_exception)
- DBG_FAULT(45)
- FAULT_OR_REFLECT(45)
-END(ia32_exception)
-
- .org ia64_ivt+0x6a00
-//////////////////////////////////////////////////////////////////////////
-// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
-ENTRY(ia32_intercept)
- DBG_FAULT(46)
- FAULT_OR_REFLECT(46)
-END(ia32_intercept)
-
- .org ia64_ivt+0x6b00
-//////////////////////////////////////////////////////////////////////////
-// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
-ENTRY(ia32_interrupt)
- DBG_FAULT(47)
- FAULT_OR_REFLECT(47)
-END(ia32_interrupt)
-
- .org ia64_ivt+0x6c00
-//////////////////////////////////////////////////////////////////////////
-// 0x6c00 Entry 48 (size 16 bundles) Reserved
- DBG_FAULT(48)
- FAULT(48)
-
- .org ia64_ivt+0x6d00
-//////////////////////////////////////////////////////////////////////////
-// 0x6d00 Entry 49 (size 16 bundles) Reserved
- DBG_FAULT(49)
- FAULT(49)
-
- .org ia64_ivt+0x6e00
-//////////////////////////////////////////////////////////////////////////
-// 0x6e00 Entry 50 (size 16 bundles) Reserved
- DBG_FAULT(50)
- FAULT(50)
-
- .org ia64_ivt+0x6f00
-//////////////////////////////////////////////////////////////////////////
-// 0x6f00 Entry 51 (size 16 bundles) Reserved
- DBG_FAULT(51)
- FAULT(51)
-
- .org ia64_ivt+0x7000
-//////////////////////////////////////////////////////////////////////////
-// 0x7000 Entry 52 (size 16 bundles) Reserved
- DBG_FAULT(52)
- FAULT(52)
-
- .org ia64_ivt+0x7100
-//////////////////////////////////////////////////////////////////////////
-// 0x7100 Entry 53 (size 16 bundles) Reserved
- DBG_FAULT(53)
- FAULT(53)
-
- .org ia64_ivt+0x7200
-//////////////////////////////////////////////////////////////////////////
-// 0x7200 Entry 54 (size 16 bundles) Reserved
- DBG_FAULT(54)
- FAULT(54)
-
- .org ia64_ivt+0x7300
-//////////////////////////////////////////////////////////////////////////
-// 0x7300 Entry 55 (size 16 bundles) Reserved
- DBG_FAULT(55)
- FAULT(55)
-
- .org ia64_ivt+0x7400
-//////////////////////////////////////////////////////////////////////////
-// 0x7400 Entry 56 (size 16 bundles) Reserved
- DBG_FAULT(56)
- FAULT(56)
-
- .org ia64_ivt+0x7500
-//////////////////////////////////////////////////////////////////////////
-// 0x7500 Entry 57 (size 16 bundles) Reserved
- DBG_FAULT(57)
- FAULT(57)
-
- .org ia64_ivt+0x7600
-//////////////////////////////////////////////////////////////////////////
-// 0x7600 Entry 58 (size 16 bundles) Reserved
- DBG_FAULT(58)
- FAULT(58)
-
- .org ia64_ivt+0x7700
-//////////////////////////////////////////////////////////////////////////
-// 0x7700 Entry 59 (size 16 bundles) Reserved
- DBG_FAULT(59)
- FAULT(59)
-
- .org ia64_ivt+0x7800
-//////////////////////////////////////////////////////////////////////////
-// 0x7800 Entry 60 (size 16 bundles) Reserved
- DBG_FAULT(60)
- FAULT(60)
-
- .org ia64_ivt+0x7900
-//////////////////////////////////////////////////////////////////////////
-// 0x7900 Entry 61 (size 16 bundles) Reserved
- DBG_FAULT(61)
- FAULT(61)
-
- .org ia64_ivt+0x7a00
-//////////////////////////////////////////////////////////////////////////
-// 0x7a00 Entry 62 (size 16 bundles) Reserved
- DBG_FAULT(62)
- FAULT(62)
-
- .org ia64_ivt+0x7b00
-//////////////////////////////////////////////////////////////////////////
-// 0x7b00 Entry 63 (size 16 bundles) Reserved
- DBG_FAULT(63)
- FAULT(63)
-
- .org ia64_ivt+0x7c00
-//////////////////////////////////////////////////////////////////////////
-// 0x7c00 Entry 64 (size 16 bundles) Reserved
- DBG_FAULT(64)
- FAULT(64)
-
- .org ia64_ivt+0x7d00
-//////////////////////////////////////////////////////////////////////////
-// 0x7d00 Entry 65 (size 16 bundles) Reserved
- DBG_FAULT(65)
- FAULT(65)
-
- .org ia64_ivt+0x7e00
-//////////////////////////////////////////////////////////////////////////
-// 0x7e00 Entry 66 (size 16 bundles) Reserved
- DBG_FAULT(66)
- FAULT(66)
-
- .org ia64_ivt+0x7f00
-//////////////////////////////////////////////////////////////////////////
-// 0x7f00 Entry 67 (size 16 bundles) Reserved
- DBG_FAULT(67)
- FAULT(67)
-
- .org ia64_ivt+0x8000
diff --git a/xen/arch/ia64/xen/machine_kexec.c b/xen/arch/ia64/xen/machine_kexec.c
deleted file mode 100644
index 463db2c387..0000000000
--- a/xen/arch/ia64/xen/machine_kexec.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/******************************************************************************
- * machine_kexec.c
- *
- * Based on arch/ia64/kernel/machine_kexec.c from Linux 2.6.20-rc1
- *
- * Xen port written by:
- * - Simon 'Horms' Horman <horms@verge.net.au>
- * - Magnus Damm <magnus@valinux.co.jp>
- */
-
-#include <asm/smp.h>
-#include <xen/lib.h>
-#include <xen/types.h>
-#include <xen/smp.h>
-#include <xen/acpi.h>
-#include <public/kexec.h>
-#include <linux/efi.h>
-#include <asm/delay.h>
-#include <asm/meminit.h>
-#include <asm/hw_irq.h>
-#include <asm/kexec.h>
-#include <asm/vhpt.h>
-#include <linux/cpu.h>
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-#include <asm/dom_fw_dom0.h>
-#include <asm-generic/sections.h>
-
-#define kexec_flush_icache_page(page) \
-do { \
- unsigned long page_addr = (unsigned long)page_address(page); \
- flush_icache_range(page_addr, page_addr + PAGE_SIZE); \
-} while(0)
-
-int machine_kexec_load(int type, int slot, xen_kexec_image_t *image)
-{
- return 0;
-}
-
-void machine_kexec_unload(int type, int slot, xen_kexec_image_t *image)
-{
-}
-
-static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
-{
- xen_kexec_image_t *image = arg;
- int ii;
-
- /* Interrupts aren't acceptable while we reboot */
- local_irq_disable();
-
- /* Mask CMC and Performance Monitor interrupts */
- ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
- ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
-
- /* Mask ITV and Local Redirect Registers */
- ia64_set_itv(1 << 16);
- ia64_set_lrr0(1 << 16);
- ia64_set_lrr1(1 << 16);
-
- /* terminate possible nested in-service interrupts */
- for (ii = 0; ii < 16; ii++)
- ia64_eoi();
-
- /* unmask TPR and clear any pending interrupts */
- ia64_setreg(_IA64_REG_CR_TPR, 0);
- ia64_srlz_d();
- while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
- ia64_eoi();
- platform_kernel_launch_event();
- relocate_new_kernel(image->indirection_page, image->start_address,
- __pa(ia64_boot_param), image->reboot_code_buffer);
- BUG();
-}
-
-/* This should probably be an arch-hook called from kexec_exec()
- * Its also likely that it should be in the xen equivalent of
- * arch/ia64/kernel/process.c */
-static void machine_shutdown(void)
-{
-#ifdef CONFIG_SMP
- unsigned int cpu;
-
- for_each_online_cpu(cpu) {
- if (cpu != smp_processor_id())
- cpu_down(cpu);
- }
-#endif
- kexec_disable_iosapic();
- acpi_restore_tables();
-}
-
-void machine_kexec(xen_kexec_image_t *image)
-{
- machine_shutdown();
- unw_init_running(ia64_machine_kexec, image);
- for(;;);
-}
-
-void machine_reboot_kexec(xen_kexec_image_t *image)
-{
- machine_kexec(image);
-}
-
-int machine_kexec_get_xen(xen_kexec_range_t *range)
-{
- range->start = ia64_tpa(_text);
- range->size = (unsigned long)_end - (unsigned long)_text;
- return 0;
-}
-
-#define ELF_PAGE_SHIFT 16
-#define ELF_PAGE_SIZE (__IA64_UL_CONST(1) << ELF_PAGE_SHIFT)
-#define ELF_PAGE_MASK (~(ELF_PAGE_SIZE - 1))
-
-static int machine_kexec_get_xenheap(xen_kexec_range_t *range)
-{
- range->start = (ia64_tpa(_end) + (ELF_PAGE_SIZE - 1)) & ELF_PAGE_MASK;
- range->size =
- (((unsigned long)range->start + KERNEL_TR_PAGE_SIZE) &
- ~(KERNEL_TR_PAGE_SIZE - 1))
- - (unsigned long)range->start;
- return 0;
-}
-
-static int machine_kexec_get_boot_param(xen_kexec_range_t *range)
-{
- range->start = __pa(ia64_boot_param);
- range->size = sizeof(*ia64_boot_param);
- return 0;
-}
-
-static int machine_kexec_get_efi_memmap(xen_kexec_range_t *range)
-{
- range->start = ia64_boot_param->efi_memmap;
- range->size = ia64_boot_param->efi_memmap_size;
- return 0;
-}
-
-int machine_kexec_get(xen_kexec_range_t *range)
-{
- switch (range->range) {
- case KEXEC_RANGE_MA_XEN:
- return machine_kexec_get_xen(range);
- case KEXEC_RANGE_MA_XENHEAP:
- return machine_kexec_get_xenheap(range);
- case KEXEC_RANGE_MA_BOOT_PARAM:
- return machine_kexec_get_boot_param(range);
- case KEXEC_RANGE_MA_EFI_MEMMAP:
- return machine_kexec_get_efi_memmap(range);
- }
- return -EINVAL;
-}
-
-void arch_crash_save_vmcoreinfo(void)
-{
- VMCOREINFO_SYMBOL(dom_xen);
- VMCOREINFO_SYMBOL(dom_io);
- VMCOREINFO_SYMBOL(xen_pstart);
- VMCOREINFO_SYMBOL(frametable_pg_dir);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
deleted file mode 100644
index b50a14911e..0000000000
--- a/xen/arch/ia64/xen/mm.c
+++ /dev/null
@@ -1,3590 +0,0 @@
-/*
- * Copyright (C) 2005 Intel Co
- * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
- *
- * 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@intel.com> Add VTI domain support
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * dom0 vp model support
- */
-
-/*
- * NOTES on SMP
- *
- * * shared structures
- * There are some structures which are accessed by CPUs concurrently.
- * Here is the list of shared structures and operations on them which
- * read/write the structures.
- *
- * - struct page_info
- * This is a xen global resource. This structure is accessed by
- * any CPUs.
- *
- * operations on this structure:
- * - get_page() and its variant
- * - put_page() and its variant
- *
- * - vTLB
- * vcpu->arch.{d, i}tlb: Software tlb cache. These are per VCPU data.
- * DEFINE_PER_CPU (unsigned long, vhpt_paddr): VHPT table per physical CPU.
- *
- * domain_flush_vtlb_range() and domain_flush_vtlb_all()
- * write vcpu->arch.{d, i}tlb and VHPT table of vcpu which isn't current.
- * So there are potential races to read/write VHPT and vcpu->arch.{d, i}tlb.
- * Please note that reading VHPT is done by hardware page table walker.
- *
- * operations on this structure:
- * - global tlb purge
- * vcpu_ptc_g(), vcpu_ptc_ga() and domain_page_flush_and_put()
- * I.e. callers of domain_flush_vtlb_range() and domain_flush_vtlb_all()
- * These functions invalidate VHPT entry and vcpu->arch.{i, d}tlb
- *
- * - tlb insert and fc
- * vcpu_itc_i()
- * vcpu_itc_d()
- * ia64_do_page_fault()
- * vcpu_fc()
- * These functions set VHPT entry and vcpu->arch.{i, d}tlb.
- * Actually vcpu_itc_no_srlz() does.
- *
- * - the P2M table
- * domain->mm and pgd, pud, pmd, pte table page.
- * This structure is used to convert domain pseudo physical address
- * to machine address. This is per domain resource.
- *
- * operations on this structure:
- * - populate the P2M table tree
- * lookup_alloc_domain_pte() and its variants.
- * - set p2m entry
- * assign_new_domain_page() and its variants.
- * assign_domain_page() and its variants.
- * - xchg p2m entry
- * assign_domain_page_replace()
- * - cmpxchg p2m entry
- * assign_domain_page_cmpxchg_rel()
- * replace_grant_host_mapping()
- * steal_page()
- * zap_domain_page_one()
- * - read p2m entry
- * lookup_alloc_domain_pte() and its variants.
- *
- * - the M2P table
- * mpt_table (or machine_to_phys_mapping)
- * This is a table which converts from machine address to pseudo physical
- * address. This is a global structure.
- *
- * operations on this structure:
- * - set m2p entry
- * set_gpfn_from_mfn()
- * - zap m2p entry
- * set_gpfn_from_mfn(INVALID_P2M_ENTRY)
- * - get m2p entry
- * get_gpfn_from_mfn()
- *
- *
- * * avoiding races
- * The resources which are shared by CPUs must be accessed carefully
- * to avoid race.
- * IA64 has weak memory ordering so that attention must be paid
- * to access shared structures. [SDM vol2 PartII chap. 2]
- *
- * - struct page_info memory ordering
- * get_page() has acquire semantics.
- * put_page() has release semantics.
- *
- * - populating the p2m table
- * pgd, pud, pmd are append only.
- *
- * - races when updating the P2M tables and the M2P table
- * The P2M entry are shared by more than one vcpu.
- * So they are accessed atomic operations.
- * I.e. xchg or cmpxchg must be used to update the p2m entry.
- * NOTE: When creating/destructing a domain, we don't need to take care of
- * this race.
- *
- * The M2P table is inverse of the P2M table.
- * I.e. P2M(M2P(p)) = p and M2P(P2M(m)) = m
- * The M2P table and P2M table must be updated consistently.
- * Here is the update sequence
- *
- * xchg or cmpxchg case
- * - set_gpfn_from_mfn(new_mfn, gpfn)
- * - memory barrier
- * - atomic update of the p2m entry (xchg or cmpxchg the p2m entry)
- * get old_mfn entry as a result.
- * - memory barrier
- * - set_gpfn_from_mfn(old_mfn, INVALID_P2M_ENTRY)
- *
- * Here memory barrier can be achieved by release semantics.
- *
- * - races between global tlb purge and tlb insert
- * This is a race between reading/writing vcpu->arch.{d, i}tlb or VHPT entry.
- * When a vcpu is about to insert tlb, another vcpu may purge tlb
- * cache globally. Inserting tlb (vcpu_itc_no_srlz()) or global tlb purge
- * (domain_flush_vtlb_range() and domain_flush_vtlb_all()) can't update
- * cpu->arch.{d, i}tlb, VHPT and mTLB. So there is a race here.
- *
- * Here check vcpu->arch.{d, i}tlb.p bit
- * After inserting tlb entry, check the p bit and retry to insert.
- * This means that when global tlb purge and tlb insert are issued
- * simultaneously, always global tlb purge happens after tlb insert.
- *
- * - races between p2m entry update and tlb insert
- * This is a race between reading/writing the p2m entry.
- * reader: vcpu_itc_i(), vcpu_itc_d(), ia64_do_page_fault(), vcpu_fc()
- * writer: assign_domain_page_cmpxchg_rel(), replace_grant_host_mapping(),
- * steal_page(), zap_domain_page_one()
- *
- * For example, vcpu_itc_i() is about to insert tlb by calling
- * vcpu_itc_no_srlz() after reading the p2m entry.
- * At the same time, the p2m entry is replaced by xchg or cmpxchg and
- * tlb cache of the page is flushed.
- * There is a possibility that the p2m entry doesn't already point to the
- * old page, but tlb cache still points to the old page.
- * This can be detected similar to sequence lock using the p2m entry itself.
- * reader remember the read value of the p2m entry, and insert tlb.
- * Then read the p2m entry again. If the new p2m entry value is different
- * from the used p2m entry value, the retry.
- *
- * - races between referencing page and p2m entry update
- * This is a race between reading/writing the p2m entry.
- * reader: vcpu_get_domain_bundle(), vmx_get_domain_bundle(),
- * efi_emulate_get_time()
- * writer: assign_domain_page_cmpxchg_rel(), replace_grant_host_mapping(),
- * steal_page(), zap_domain_page_one()
- *
- * A page which assigned to a domain can be de-assigned by another vcpu.
- * So before read/write to a domain page, the page's reference count
- * must be incremented.
- * vcpu_get_domain_bundle(), vmx_get_domain_bundle() and
- * efi_emulate_get_time()
- *
- */
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <xen/domain.h>
-#include <asm/xentypes.h>
-#include <xen/mm.h>
-#include <xen/errno.h>
-#include <asm/pgalloc.h>
-#include <asm/vhpt.h>
-#include <asm/vcpu.h>
-#include <asm/shadow.h>
-#include <asm/p2m_entry.h>
-#include <asm/tlb_track.h>
-#include <linux/efi.h>
-#include <linux/sort.h>
-#include <xen/grant_table.h>
-#include <xen/guest_access.h>
-#include <asm/page.h>
-#include <asm/dom_fw_common.h>
-#include <xsm/xsm.h>
-#include <public/memory.h>
-#include <asm/event.h>
-#include <asm/debugger.h>
-
-
-#define MEM_LOG(_f, _a...) gdprintk(XENLOG_WARNING, _f "\n", ## _a)
-
-static void domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
- volatile pte_t* ptep, pte_t old_pte,
- struct page_info* page);
-
-static void __xencomm_mark_dirty(struct domain *d,
- unsigned long addr, unsigned int len);
-
-extern unsigned long ia64_iobase;
-
-struct domain *dom_xen, *dom_io, *dom_cow;
-
-/*
- * This number is bigger than DOMID_SELF, DOMID_XEN and DOMID_IO.
- * If more reserved domain ids are introduced, this might be increased.
- */
-#define DOMID_P2M (0x7FF8U)
-static struct domain *dom_p2m;
-
-// followings are stolen from arch_init_memory() @ xen/arch/x86/mm.c
-void
-alloc_dom_xen_and_dom_io(void)
-{
- /*
- * Initialise our DOMID_XEN domain.
- * Any Xen-heap pages that we will allow to be mapped will have
- * their domain field set to dom_xen.
- */
- dom_xen = domain_create(DOMID_XEN, DOMCRF_dummy, 0);
- BUG_ON(dom_xen == NULL);
-
- /*
- * Initialise our DOMID_IO domain.
- * This domain owns I/O pages that are within the range of the page_info
- * array. Mappings occur at the priv of the caller.
- */
- dom_io = domain_create(DOMID_IO, DOMCRF_dummy, 0);
- BUG_ON(dom_io == NULL);
-
- /*
- * Initialise our DOMID_IO domain.
- * This domain owns sharable pages.
- */
- dom_cow = domain_create(DOMID_COW, DOMCRF_dummy, 0);
- BUG_ON(dom_cow == NULL);
-}
-
-static int
-mm_teardown_can_skip(struct domain* d, unsigned long offset)
-{
- return d->arch.mm_teardown_offset > offset;
-}
-
-static void
-mm_teardown_update_offset(struct domain* d, unsigned long offset)
-{
- d->arch.mm_teardown_offset = offset;
-}
-
-static void
-mm_teardown_pte(struct domain* d, volatile pte_t* pte, unsigned long offset)
-{
- pte_t old_pte;
- unsigned long mfn;
- struct page_info* page;
-
- old_pte = ptep_get_and_clear(&d->arch.mm, offset, pte);// acquire semantics
-
- // vmx domain use bit[58:56] to distinguish io region from memory.
- // see vmx_build_physmap_table() in vmx_init.c
- if (!pte_mem(old_pte))
- return;
-
- // domain might map IO space or acpi table pages. check it.
- mfn = pte_pfn(old_pte);
- if (!mfn_valid(mfn))
- return;
- page = mfn_to_page(mfn);
- BUG_ON(page_get_owner(page) == NULL);
-
- // struct page_info corresponding to mfn may exist or not depending
- // on CONFIG_VIRTUAL_FRAME_TABLE.
- // The above check is too easy.
- // The right way is to check whether this page is of io area or acpi pages
-
- if (pte_pgc_allocated(old_pte)) {
- BUG_ON(page_get_owner(page) != d);
- BUG_ON(get_gpfn_from_mfn(mfn) == INVALID_M2P_ENTRY);
- set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
- if (test_and_clear_bit(_PGC_allocated, &page->count_info))
- put_page(page);
- } else {
- put_page(page);
- }
-}
-
-static int
-mm_teardown_pmd(struct domain* d, volatile pmd_t* pmd, unsigned long offset)
-{
- unsigned long i;
- volatile pte_t* pte = pte_offset_map(pmd, offset);
-
- for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
- unsigned long cur_offset = offset + (i << PAGE_SHIFT);
- if (mm_teardown_can_skip(d, cur_offset + PAGE_SIZE))
- continue;
- if (!pte_present(*pte)) { // acquire semantics
- mm_teardown_update_offset(d, cur_offset);
- continue;
- }
- mm_teardown_update_offset(d, cur_offset);
- mm_teardown_pte(d, pte, cur_offset);
- if (hypercall_preempt_check())
- return -EAGAIN;
- }
- return 0;
-}
-
-static int
-mm_teardown_pud(struct domain* d, volatile pud_t *pud, unsigned long offset)
-{
- unsigned long i;
- volatile pmd_t *pmd = pmd_offset(pud, offset);
-
- for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
- unsigned long cur_offset = offset + (i << PMD_SHIFT);
- if (mm_teardown_can_skip(d, cur_offset + PMD_SIZE))
- continue;
- if (!pmd_present(*pmd)) { // acquire semantics
- mm_teardown_update_offset(d, cur_offset);
- continue;
- }
- if (mm_teardown_pmd(d, pmd, cur_offset))
- return -EAGAIN;
- }
- return 0;
-}
-
-static int
-mm_teardown_pgd(struct domain* d, volatile pgd_t *pgd, unsigned long offset)
-{
- unsigned long i;
- volatile pud_t *pud = pud_offset(pgd, offset);
-
- for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
- unsigned long cur_offset = offset + (i << PUD_SHIFT);
-#ifndef __PAGETABLE_PUD_FOLDED
- if (mm_teardown_can_skip(d, cur_offset + PUD_SIZE))
- continue;
-#endif
- if (!pud_present(*pud)) { // acquire semantics
-#ifndef __PAGETABLE_PUD_FOLDED
- mm_teardown_update_offset(d, cur_offset);
-#endif
- continue;
- }
- if (mm_teardown_pud(d, pud, cur_offset))
- return -EAGAIN;
- }
- return 0;
-}
-
-int
-mm_teardown(struct domain* d)
-{
- struct mm_struct* mm = &d->arch.mm;
- unsigned long i;
- volatile pgd_t* pgd;
-
- if (mm->pgd == NULL)
- return 0;
-
- pgd = pgd_offset(mm, 0);
- for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
- unsigned long cur_offset = i << PGDIR_SHIFT;
-
- if (mm_teardown_can_skip(d, cur_offset + PGDIR_SIZE))
- continue;
- if (!pgd_present(*pgd)) { // acquire semantics
- mm_teardown_update_offset(d, cur_offset);
- continue;
- }
- if (mm_teardown_pgd(d, pgd, cur_offset))
- return -EAGAIN;
- }
-
- foreign_p2m_destroy(d);
- return 0;
-}
-
-static void
-mm_p2m_teardown_pmd(struct domain* d, volatile pmd_t* pmd,
- unsigned long offset)
-{
- pte_free_kernel(pte_offset_map(pmd, offset));
-}
-
-static void
-mm_p2m_teardown_pud(struct domain* d, volatile pud_t *pud,
- unsigned long offset)
-{
- unsigned long i;
- volatile pmd_t *pmd = pmd_offset(pud, offset);
-
- for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
- if (!pmd_present(*pmd))
- continue;
- mm_p2m_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT));
- }
- pmd_free(pmd_offset(pud, offset));
-}
-
-static void
-mm_p2m_teardown_pgd(struct domain* d, volatile pgd_t *pgd,
- unsigned long offset)
-{
- unsigned long i;
- volatile pud_t *pud = pud_offset(pgd, offset);
-
- for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
- if (!pud_present(*pud))
- continue;
- mm_p2m_teardown_pud(d, pud, offset + (i << PUD_SHIFT));
- }
- pud_free(pud_offset(pgd, offset));
-}
-
-static void
-mm_p2m_teardown(struct domain* d)
-{
- struct mm_struct* mm = &d->arch.mm;
- unsigned long i;
- volatile pgd_t* pgd;
-
- BUG_ON(mm->pgd == NULL);
- pgd = pgd_offset(mm, 0);
- for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
- if (!pgd_present(*pgd))
- continue;
- mm_p2m_teardown_pgd(d, pgd, i << PGDIR_SHIFT);
- }
- pgd_free(mm->pgd);
- mm->pgd = NULL;
-}
-
-void
-mm_final_teardown(struct domain* d)
-{
- if (d->arch.shadow_bitmap != NULL) {
- xfree(d->arch.shadow_bitmap);
- d->arch.shadow_bitmap = NULL;
- }
- mm_p2m_teardown(d);
-}
-
-unsigned long
-domain_get_maximum_gpfn(struct domain *d)
-{
- return (d->arch.convmem_end - 1) >> PAGE_SHIFT;
-}
-
-// stolen from share_xen_page_with_guest() in xen/arch/x86/mm.c
-void
-share_xen_page_with_guest(struct page_info *page,
- struct domain *d, int readonly)
-{
- if ( page_get_owner(page) == d )
- return;
-
-#if 1
- if (readonly) {
- printk("%s:%d readonly is not supported yet\n", __func__, __LINE__);
- }
-#endif
-
- // alloc_xenheap_pages() doesn't initialize page owner.
- //BUG_ON(page_get_owner(page) != NULL);
-
- spin_lock(&d->page_alloc_lock);
-
-#ifndef __ia64__
- /* The incremented type count pins as writable or read-only. */
- page->u.inuse.type_info = (readonly ? PGT_none : PGT_writable_page);
- page->u.inuse.type_info |= PGT_validated | 1;
-#endif
-
- page_set_owner(page, d);
- wmb(); /* install valid domain ptr before updating refcnt. */
- ASSERT((page->count_info & ~PGC_xen_heap)== 0);
-
- /* Only add to the allocation list if the domain isn't dying. */
- if ( !d->is_dying )
- {
- page->count_info |= PGC_allocated | 1;
- if ( unlikely(d->xenheap_pages++ == 0) )
- get_knownalive_domain(d);
- page_list_add_tail(page, &d->xenpage_list);
- }
-
- // grant_table_destroy() releases these pages.
- // but it doesn't clear their m2p entry. So there might remain stale
- // entries. such a stale entry is cleared here.
- set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
-
- spin_unlock(&d->page_alloc_lock);
-}
-
-void
-share_xen_page_with_privileged_guests(struct page_info *page, int readonly)
-{
- share_xen_page_with_guest(page, dom_xen, readonly);
-}
-
-unsigned long
-gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
-{
- unsigned long pte;
-
- pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT, NULL);
- if (!pte) {
- panic("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
- }
-
- if ((pte & _PAGE_IO) && is_hvm_domain(d))
- return INVALID_MFN;
-
- return ((pte & _PFN_MASK) >> PAGE_SHIFT);
-}
-
-// given a domain virtual address, pte and pagesize, extract the metaphysical
-// address, convert the pte for a physical address for (possibly different)
-// Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
-// current->arch.vhpt_pg_shift!)
-u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* itir,
- struct p2m_entry* entry)
-{
- struct domain *d = current->domain;
- ia64_itir_t _itir = {.itir = itir__};
- u64 mask, mpaddr, pteval2;
- u64 arflags;
- u64 arflags2;
- u64 maflags2;
-
- pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
-
- // FIXME address had better be pre-validated on insert
- mask = ~itir_mask(_itir.itir);
- mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
-
- if (_itir.ps > PAGE_SHIFT)
- _itir.ps = PAGE_SHIFT;
-
- ((ia64_itir_t*)itir)->itir = _itir.itir;/* Copy the whole register. */
- ((ia64_itir_t*)itir)->ps = _itir.ps; /* Overwrite ps part! */
-
- pteval2 = lookup_domain_mpa(d, mpaddr, entry);
- if (_itir.ps < PAGE_SHIFT)
- pteval2 |= mpaddr & ~PAGE_MASK & ~((1L << _itir.ps) - 1);
-
- /* Check access rights. */
- arflags = pteval & _PAGE_AR_MASK;
- arflags2 = pteval2 & _PAGE_AR_MASK;
- if (arflags != _PAGE_AR_R && arflags2 == _PAGE_AR_R) {
-#if 0
- dprintk(XENLOG_WARNING,
- "%s:%d "
- "pteval 0x%lx arflag 0x%lx address 0x%lx itir 0x%lx "
- "pteval2 0x%lx arflags2 0x%lx mpaddr 0x%lx\n",
- __func__, __LINE__,
- pteval, arflags, address, itir__,
- pteval2, arflags2, mpaddr);
-#endif
- pteval = (pteval & ~_PAGE_AR_MASK) | _PAGE_AR_R;
- }
-
- /* Check memory attribute. The switch is on the *requested* memory
- attribute. */
- maflags2 = pteval2 & _PAGE_MA_MASK;
- switch (pteval & _PAGE_MA_MASK) {
- case _PAGE_MA_NAT:
- /* NaT pages are always accepted! */
- break;
- case _PAGE_MA_UC:
- case _PAGE_MA_UCE:
- case _PAGE_MA_WC:
- if (maflags2 == _PAGE_MA_WB) {
- /* Don't let domains WB-map uncached addresses.
- This can happen when domU tries to touch i/o
- port space. Also prevents possible address
- aliasing issues. */
- if (!(mpaddr - IO_PORTS_PADDR < IO_PORTS_SIZE)) {
- u64 ucwb;
-
- /*
- * If dom0 page has both UC & WB attributes
- * don't warn about attempted UC access.
- */
- ucwb = efi_mem_attribute(mpaddr, PAGE_SIZE);
- ucwb &= EFI_MEMORY_UC | EFI_MEMORY_WB;
- ucwb ^= EFI_MEMORY_UC | EFI_MEMORY_WB;
-
- if (d != dom0 || ucwb != 0)
- gdprintk(XENLOG_WARNING, "Warning: UC"
- " to WB for mpaddr=%lx\n",
- mpaddr);
- }
- pteval = (pteval & ~_PAGE_MA_MASK) | _PAGE_MA_WB;
- }
- break;
- case _PAGE_MA_WB:
- if (maflags2 != _PAGE_MA_WB) {
- /* Forbid non-coherent access to coherent memory. */
- panic_domain(NULL, "try to use WB mem attr on "
- "UC page, mpaddr=%lx\n", mpaddr);
- }
- break;
- default:
- panic_domain(NULL, "try to use unknown mem attribute\n");
- }
-
- /* If shadow mode is enabled, virtualize dirty bit. */
- if (shadow_mode_enabled(d) && (pteval & _PAGE_D)) {
- u64 mp_page = mpaddr >> PAGE_SHIFT;
- pteval |= _PAGE_VIRT_D;
-
- /* If the page is not already dirty, don't set the dirty bit! */
- if (mp_page < d->arch.shadow_bitmap_size * 8
- && !test_bit(mp_page, d->arch.shadow_bitmap))
- pteval &= ~_PAGE_D;
- }
-
- /* Ignore non-addr bits of pteval2 and force PL0->1
- (PL3 is unaffected) */
- return (pteval & ~(_PAGE_PPN_MASK | _PAGE_PL_MASK)) |
- (pteval2 & _PAGE_PPN_MASK) |
- (vcpu_pl_adjust(pteval, 7) & _PAGE_PL_MASK);
-}
-
-// given a current domain metaphysical address, return the physical address
-unsigned long translate_domain_mpaddr(unsigned long mpaddr,
- struct p2m_entry* entry)
-{
- unsigned long pteval;
-
- pteval = lookup_domain_mpa(current->domain, mpaddr, entry);
- return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
-}
-
-//XXX !xxx_present() should be used instread of !xxx_none()?
-// pud, pmd, pte page is zero cleared when they are allocated.
-// Their area must be visible before population so that
-// cmpxchg must have release semantics.
-static volatile pte_t*
-lookup_alloc_domain_pte(struct domain* d, unsigned long mpaddr)
-{
- struct mm_struct *mm = &d->arch.mm;
- volatile pgd_t *pgd;
- volatile pud_t *pud;
- volatile pmd_t *pmd;
-
- BUG_ON(mm->pgd == NULL);
-
- pgd = pgd_offset(mm, mpaddr);
- again_pgd:
- if (unlikely(pgd_none(*pgd))) { // acquire semantics
- pud_t *old_pud = NULL;
- pud = pud_alloc_one(mm, mpaddr);
- if (unlikely(!pgd_cmpxchg_rel(mm, pgd, old_pud, pud))) {
- pud_free(pud);
- goto again_pgd;
- }
- }
-
- pud = pud_offset(pgd, mpaddr);
- again_pud:
- if (unlikely(pud_none(*pud))) { // acquire semantics
- pmd_t* old_pmd = NULL;
- pmd = pmd_alloc_one(mm, mpaddr);
- if (unlikely(!pud_cmpxchg_rel(mm, pud, old_pmd, pmd))) {
- pmd_free(pmd);
- goto again_pud;
- }
- }
-
- pmd = pmd_offset(pud, mpaddr);
- again_pmd:
- if (unlikely(pmd_none(*pmd))) { // acquire semantics
- pte_t* old_pte = NULL;
- pte_t* pte = pte_alloc_one_kernel(mm, mpaddr);
- if (unlikely(!pmd_cmpxchg_kernel_rel(mm, pmd, old_pte, pte))) {
- pte_free_kernel(pte);
- goto again_pmd;
- }
- }
-
- return pte_offset_map(pmd, mpaddr);
-}
-
-//XXX xxx_none() should be used instread of !xxx_present()?
-volatile pte_t*
-lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr)
-{
- struct mm_struct *mm = &d->arch.mm;
- volatile pgd_t *pgd;
- volatile pud_t *pud;
- volatile pmd_t *pmd;
-
- BUG_ON(mm->pgd == NULL);
- pgd = pgd_offset(mm, mpaddr);
- if (unlikely(!pgd_present(*pgd))) // acquire semantics
- return NULL;
-
- pud = pud_offset(pgd, mpaddr);
- if (unlikely(!pud_present(*pud))) // acquire semantics
- return NULL;
-
- pmd = pmd_offset(pud, mpaddr);
- if (unlikely(!pmd_present(*pmd))) // acquire semantics
- return NULL;
-
- return pte_offset_map(pmd, mpaddr);
-}
-
-static volatile pte_t*
-lookup_noalloc_domain_pte_none(struct domain* d, unsigned long mpaddr)
-{
- struct mm_struct *mm = &d->arch.mm;
- volatile pgd_t *pgd;
- volatile pud_t *pud;
- volatile pmd_t *pmd;
-
- BUG_ON(mm->pgd == NULL);
- pgd = pgd_offset(mm, mpaddr);
- if (unlikely(pgd_none(*pgd))) // acquire semantics
- return NULL;
-
- pud = pud_offset(pgd, mpaddr);
- if (unlikely(pud_none(*pud))) // acquire semantics
- return NULL;
-
- pmd = pmd_offset(pud, mpaddr);
- if (unlikely(pmd_none(*pmd))) // acquire semantics
- return NULL;
-
- return pte_offset_map(pmd, mpaddr);
-}
-
-unsigned long
-____lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
-{
- volatile pte_t *pte;
-
- pte = lookup_noalloc_domain_pte(d, mpaddr);
- if (pte == NULL)
- return INVALID_MFN;
-
- if (pte_present(*pte))
- return (pte->pte & _PFN_MASK);
- return INVALID_MFN;
-}
-
-unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr,
- struct p2m_entry* entry)
-{
- volatile pte_t *pte = lookup_noalloc_domain_pte(d, mpaddr);
-
- if (pte != NULL) {
- pte_t tmp_pte = *pte;// pte is volatile. copy the value.
- if (pte_present(tmp_pte)) {
- if (entry != NULL)
- p2m_entry_set(entry, pte, tmp_pte);
- return pte_val(tmp_pte);
- } else if (is_hvm_domain(d))
- return INVALID_MFN;
- }
-
- if (mpaddr < d->arch.convmem_end && !d->is_dying) {
- gdprintk(XENLOG_WARNING, "vcpu %d iip 0x%016lx: non-allocated mpa "
- "d %"PRId16" 0x%lx (< 0x%lx)\n",
- current->vcpu_id, PSCB(current, iip),
- d->domain_id, mpaddr, d->arch.convmem_end);
- } else if (mpaddr - IO_PORTS_PADDR < IO_PORTS_SIZE) {
- /* Log I/O port probing, but complain less loudly about it */
- gdprintk(XENLOG_INFO, "vcpu %d iip 0x%016lx: bad I/O port access "
- "d %"PRId16" 0x%lx\n",
- current->vcpu_id, PSCB(current, iip), d->domain_id,
- IO_SPACE_SPARSE_DECODING(mpaddr - IO_PORTS_PADDR));
- } else {
- gdprintk(XENLOG_WARNING, "vcpu %d iip 0x%016lx: bad mpa "
- "d %"PRId16" 0x%lx (=> 0x%lx)\n",
- current->vcpu_id, PSCB(current, iip),
- d->domain_id, mpaddr, d->arch.convmem_end);
- }
-
- debugger_event (XEN_IA64_DEBUG_ON_BAD_MPA);
-
- if (entry != NULL)
- p2m_entry_set(entry, NULL, __pte(0));
- //XXX This is a work around until the emulation memory access to a region
- // where memory or device are attached is implemented.
- return pte_val(pfn_pte(0, __pgprot(__DIRTY_BITS | _PAGE_PL_PRIV |
- _PAGE_AR_RWX)));
-}
-
-// FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
-#if 1
-void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
-{
- unsigned long pte = lookup_domain_mpa(d, mpaddr, NULL);
- unsigned long imva;
-
- pte &= _PAGE_PPN_MASK;
- imva = (unsigned long) __va(pte);
- imva |= mpaddr & ~PAGE_MASK;
- return (void*)imva;
-}
-#else
-void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
-{
- unsigned long imva = __gpa_to_mpa(d, mpaddr);
-
- return (void *)__va(imva);
-}
-#endif
-
-unsigned long
-paddr_to_maddr(unsigned long paddr)
-{
- struct vcpu *v = current;
- struct domain *d = v->domain;
- u64 pa;
-
- pa = ____lookup_domain_mpa(d, paddr);
- if (pa == INVALID_MFN) {
- printk("%s: called with bad memory address: 0x%lx - iip=%lx\n",
- __func__, paddr, vcpu_regs(v)->cr_iip);
- return 0;
- }
- return (pa & _PFN_MASK) | (paddr & ~PAGE_MASK);
-}
-
-/* Allocate a new page for domain and map it to the specified metaphysical
- address. */
-static struct page_info *
-__assign_new_domain_page(struct domain *d, unsigned long mpaddr,
- volatile pte_t* pte)
-{
- struct page_info *p;
- unsigned long maddr;
-
- BUG_ON(!pte_none(*pte));
-
- p = alloc_domheap_page(d, 0);
- if (unlikely(!p)) {
- printk("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
- return(p);
- }
-
- // zero out pages for security reasons
- clear_page(page_to_virt(p));
- maddr = page_to_maddr (p);
- if (unlikely(maddr > __get_cpu_var(vhpt_paddr)
- && maddr < __get_cpu_var(vhpt_pend))) {
- /* FIXME: how can this happen ?
- vhpt is allocated by alloc_domheap_page. */
- printk("assign_new_domain_page: reassigned vhpt page %lx!!\n",
- maddr);
- }
-
- set_gpfn_from_mfn(page_to_mfn(p), mpaddr >> PAGE_SHIFT);
- // clear_page() and set_gpfn_from_mfn() become visible before set_pte_rel()
- // because set_pte_rel() has release semantics
- set_pte_rel(pte,
- pfn_pte(maddr >> PAGE_SHIFT,
- __pgprot(_PAGE_PGC_ALLOCATED | __DIRTY_BITS |
- _PAGE_PL_PRIV | _PAGE_AR_RWX)));
-
- smp_mb();
- return p;
-}
-
-struct page_info *
-assign_new_domain_page(struct domain *d, unsigned long mpaddr)
-{
- volatile pte_t *pte = lookup_alloc_domain_pte(d, mpaddr);
-
- if (!pte_none(*pte))
- return NULL;
-
- return __assign_new_domain_page(d, mpaddr, pte);
-}
-
-void __init
-assign_new_domain0_page(struct domain *d, unsigned long mpaddr)
-{
- volatile pte_t *pte;
-
- BUG_ON(d != dom0);
- pte = lookup_alloc_domain_pte(d, mpaddr);
- if (pte_none(*pte)) {
- struct page_info *p = __assign_new_domain_page(d, mpaddr, pte);
- if (p == NULL) {
- panic("%s: can't allocate page for dom0\n", __func__);
- }
- }
-}
-
-static unsigned long
-flags_to_prot (unsigned long flags)
-{
- unsigned long res = _PAGE_PL_PRIV | __DIRTY_BITS;
-
- res |= flags & ASSIGN_readonly ? _PAGE_AR_R: _PAGE_AR_RWX;
- res |= flags & ASSIGN_nocache ? _PAGE_MA_UC: _PAGE_MA_WB;
-#ifdef CONFIG_XEN_IA64_TLB_TRACK
- res |= flags & ASSIGN_tlb_track ? _PAGE_TLB_TRACKING: 0;
-#endif
- res |= flags & ASSIGN_pgc_allocated ? _PAGE_PGC_ALLOCATED: 0;
- res |= flags & ASSIGN_io ? _PAGE_IO: 0;
-
- return res;
-}
-
-/* map a physical address to the specified metaphysical addr */
-// flags: currently only ASSIGN_readonly, ASSIGN_nocache, ASSIGN_tlb_tack
-// This is called by assign_domain_mmio_page().
-// So accessing to pte is racy.
-int
-__assign_domain_page(struct domain *d,
- unsigned long mpaddr, unsigned long physaddr,
- unsigned long flags)
-{
- volatile pte_t *pte;
- pte_t old_pte;
- pte_t new_pte;
- pte_t ret_pte;
- unsigned long prot = flags_to_prot(flags);
-
- pte = lookup_alloc_domain_pte(d, mpaddr);
-
- old_pte = __pte(0);
- new_pte = pfn_pte(physaddr >> PAGE_SHIFT, __pgprot(prot));
- again_hvm_page_io:
- ret_pte = ptep_cmpxchg_rel(&d->arch.mm, mpaddr, pte, old_pte, new_pte);
- if (pte_val(ret_pte) == pte_val(old_pte)) {
- smp_mb();
- return 0;
- }
- /* in HVM guest, when VTD is enabled,
- * P2M entry may change from _PAGE_IO type to real MMIO page
- */
- if(is_hvm_domain(d) && (pte_val(ret_pte) & _PAGE_IO) &&
- !mfn_valid(physaddr >> PAGE_SHIFT)) {
- old_pte = ret_pte;
- goto again_hvm_page_io;
- }
-
- // dom0 tries to map real machine's I/O region, but failed.
- // It is very likely that dom0 doesn't boot correctly because
- // it can't access I/O. So complain here.
- if (flags & ASSIGN_nocache) {
- int warn = 0;
-
- if (pte_pfn(ret_pte) != (physaddr >> PAGE_SHIFT))
- warn = 1;
- else if (!(pte_val(ret_pte) & _PAGE_MA_UC)) {
- u32 type;
- u64 attr;
-
- warn = 1;
-
- /*
- * See
- * complete_dom0_memmap()
- * case EFI_RUNTIME_SERVICES_CODE:
- * case EFI_RUNTIME_SERVICES_DATA:
- * case EFI_ACPI_RECLAIM_MEMORY:
- * case EFI_ACPI_MEMORY_NVS:
- * case EFI_RESERVED_TYPE:
- *
- * Currently only EFI_RUNTIME_SERVICES_CODE is found
- * so that we suppress only EFI_RUNTIME_SERVICES_CODE case.
- */
- type = efi_mem_type(physaddr);
- attr = efi_mem_attributes(physaddr);
- if (type == EFI_RUNTIME_SERVICES_CODE &&
- (attr & EFI_MEMORY_UC) && (attr & EFI_MEMORY_WB))
- warn = 0;
- }
- if (warn)
- printk("%s:%d WARNING can't assign page domain 0x%p id %d\n"
- "\talready assigned pte_val 0x%016lx\n"
- "\tmpaddr 0x%016lx physaddr 0x%016lx flags 0x%lx\n",
- __func__, __LINE__,
- d, d->domain_id, pte_val(ret_pte),
- mpaddr, physaddr, flags);
- }
-
- return -EAGAIN;
-}
-
-/* get_page() and map a physical address to the specified metaphysical addr */
-void
-assign_domain_page(struct domain *d,
- unsigned long mpaddr, unsigned long physaddr)
-{
- struct page_info* page = mfn_to_page(physaddr >> PAGE_SHIFT);
-
- BUG_ON((physaddr & _PAGE_PPN_MASK) != physaddr);
- BUG_ON((page->count_info & ~PGC_xen_heap) != (PGC_allocated | 1));
- set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
- // because __assign_domain_page() uses set_pte_rel() which has
- // release semantics, smp_mb() isn't needed.
- (void)__assign_domain_page(d, mpaddr, physaddr,
- ASSIGN_writable | ASSIGN_pgc_allocated);
-}
-
-static void
-ioports_get_mmio_addr(const struct io_space *space,
- unsigned long fp, unsigned long lp,
- unsigned long *mmio_start, unsigned long *mmio_end)
-{
- if (space->sparse) {
- *mmio_start = IO_SPACE_SPARSE_ENCODING(fp) & PAGE_MASK;
- *mmio_end = PAGE_ALIGN(IO_SPACE_SPARSE_ENCODING(lp));
- } else {
- *mmio_start = fp & PAGE_MASK;
- *mmio_end = PAGE_ALIGN(lp);
- }
-}
-
-static unsigned long
-ioports_get_mmio_base(const struct io_space *space, struct domain *d)
-{
- if (VMX_DOMAIN(d->vcpu[0]))
- return LEGACY_IO_START;
-
- if (space == &io_space[0] && d != dom0)
- return IO_PORTS_PADDR;
-
- return __pa(space->mmio_base);
-}
-
-/*
- * Inpurt
- * fgp: first guest port
- * fmp: first machine port
- * lmp: last machine port
- */
-int
-ioports_permit_access(struct domain *d, unsigned int fgp,
- unsigned int fmp, unsigned int lmp)
-{
- struct io_space *space;
- unsigned long mmio_start, mach_start, mach_end;
- int ret;
-
- if (IO_SPACE_NR(fmp) >= num_io_spaces) {
- dprintk(XENLOG_WARNING, "Unknown I/O Port range 0x%x - 0x%x\n", fmp, lmp);
- return -EFAULT;
- }
-
- /*
- * The ioport_cap rangeset tracks the I/O port address including
- * the port space ID. This means port space IDs need to match
- * between Xen and dom0. This is also a requirement because
- * the hypercall to pass these port ranges only uses a u32.
- *
- * NB - non-dom0 driver domains may only have a subset of the
- * I/O port spaces and thus will number port spaces differently.
- * This is ok, they don't make use of this interface.
- */
- ret = rangeset_add_range(d->arch.ioport_caps, fmp, lmp);
- if (ret != 0)
- return ret;
-
- space = &io_space[IO_SPACE_NR(fmp)];
-
- /* Legacy I/O on dom0 is already setup */
- if (d == dom0 && space == &io_space[0])
- return 0;
-
- fmp = IO_SPACE_PORT(fmp);
- lmp = IO_SPACE_PORT(lmp);
-
- ioports_get_mmio_addr(space, fmp, lmp, &mach_start, &mach_end);
-
- /*
- * The "machine first port" is not necessarily identity mapped
- * to the guest first port. At least for the legacy range.
- */
- mach_start = mach_start | __pa(space->mmio_base);
- mach_end = mach_end | __pa(space->mmio_base);
-
- mmio_start = IO_SPACE_SPARSE_ENCODING(fgp) & PAGE_MASK;
- mmio_start |= ioports_get_mmio_base(space, d);
-
- while (mach_start < mach_end) {
- (void)__assign_domain_page(d, mmio_start, mach_start, ASSIGN_nocache);
- mmio_start += PAGE_SIZE;
- mach_start += PAGE_SIZE;
- }
-
- return 0;
-}
-
-static int
-ioports_has_allowed(struct domain *d, unsigned int fp, unsigned int lp)
-{
- for (; fp < lp; fp++)
- if (rangeset_contains_singleton(d->arch.ioport_caps, fp))
- return 1;
-
- return 0;
-}
-
-int
-ioports_deny_access(struct domain *d, unsigned int fp, unsigned int lp)
-{
- int ret;
- struct mm_struct *mm = &d->arch.mm;
- unsigned long mmio_start, mmio_end, mmio_base;
- unsigned int fp_base, lp_base;
- struct io_space *space;
-
- if (IO_SPACE_NR(fp) >= num_io_spaces) {
- dprintk(XENLOG_WARNING, "Unknown I/O Port range 0x%x - 0x%x\n", fp, lp);
- return -EFAULT;
- }
-
- ret = rangeset_remove_range(d->arch.ioport_caps, fp, lp);
- if (ret != 0)
- return ret;
-
- space = &io_space[IO_SPACE_NR(fp)];
- fp_base = IO_SPACE_PORT(fp);
- lp_base = IO_SPACE_PORT(lp);
-
- ioports_get_mmio_addr(space, fp_base, lp_base, &mmio_start, &mmio_end);
-
- mmio_base = ioports_get_mmio_base(space, d);
-
- for (; mmio_start < mmio_end; mmio_start += PAGE_SIZE) {
- unsigned int port, range;
- unsigned long mpaddr;
- volatile pte_t *pte;
- pte_t old_pte;
-
- if (space->sparse) {
- port = IO_SPACE_SPARSE_DECODING(mmio_start);
- range = IO_SPACE_SPARSE_PORTS_PER_PAGE - 1;
- } else {
- port = mmio_start;
- range = PAGE_SIZE - 1;
- }
-
- port |= IO_SPACE_BASE(IO_SPACE_NR(fp));
-
- if (port < fp || port + range > lp) {
- /* Maybe this covers an allowed port. */
- if (ioports_has_allowed(d, port, port + range))
- continue;
- }
-
- mpaddr = mmio_start | mmio_base;
- pte = lookup_noalloc_domain_pte_none(d, mpaddr);
- BUG_ON(pte == NULL);
- BUG_ON(pte_none(*pte));
-
- /* clear pte */
- old_pte = ptep_get_and_clear(mm, mpaddr, pte);
- }
- domain_flush_vtlb_all(d);
- return 0;
-}
-
-static void
-assign_domain_same_page(struct domain *d,
- unsigned long mpaddr, unsigned long size,
- unsigned long flags)
-{
- //XXX optimization
- unsigned long end = PAGE_ALIGN(mpaddr + size);
- for (mpaddr &= PAGE_MASK; mpaddr < end; mpaddr += PAGE_SIZE) {
- (void)__assign_domain_page(d, mpaddr, mpaddr, flags);
- }
-}
-
-int
-efi_mmio(unsigned long physaddr, unsigned long size)
-{
- void *efi_map_start, *efi_map_end;
- u64 efi_desc_size;
- void* p;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- efi_memory_desc_t* md = (efi_memory_desc_t *)p;
- unsigned long start = md->phys_addr;
- unsigned long end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-
- if (start <= physaddr && physaddr < end) {
- if ((physaddr + size) > end) {
- gdprintk(XENLOG_INFO, "%s: physaddr 0x%lx size = 0x%lx\n",
- __func__, physaddr, size);
- return 0;
- }
-
- // for io space
- if (md->type == EFI_MEMORY_MAPPED_IO ||
- md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
- return 1;
- }
-
- // for runtime
- // see efi_enter_virtual_mode(void)
- // in linux/arch/ia64/kernel/efi.c
- if ((md->attribute & EFI_MEMORY_RUNTIME) &&
- !(md->attribute & EFI_MEMORY_WB)) {
- return 1;
- }
-
- return 0;
- }
-
- if (physaddr < start) {
- break;
- }
- }
-
- return 1;
-}
-
-unsigned long
-assign_domain_mmio_page(struct domain *d, unsigned long mpaddr,
- unsigned long phys_addr, unsigned long size,
- unsigned long flags)
-{
- unsigned long addr = mpaddr & PAGE_MASK;
- unsigned long end = PAGE_ALIGN(mpaddr + size);
-
- if (size == 0) {
- gdprintk(XENLOG_INFO, "%s: domain %p mpaddr 0x%lx size = 0x%lx\n",
- __func__, d, mpaddr, size);
- }
- if (!efi_mmio(phys_addr, size)) {
-#ifndef NDEBUG
- gdprintk(XENLOG_INFO, "%s: domain %p mpaddr 0x%lx size = 0x%lx\n",
- __func__, d, mpaddr, size);
-#endif
- return -EINVAL;
- }
-
- for (phys_addr &= PAGE_MASK; addr < end;
- addr += PAGE_SIZE, phys_addr += PAGE_SIZE) {
- __assign_domain_page(d, addr, phys_addr, flags);
- }
-
- return mpaddr;
-}
-
-unsigned long
-assign_domain_mach_page(struct domain *d,
- unsigned long mpaddr, unsigned long size,
- unsigned long flags)
-{
- BUG_ON(flags & ASSIGN_pgc_allocated);
- assign_domain_same_page(d, mpaddr, size, flags);
- return mpaddr;
-}
-
-static void
-adjust_page_count_info(struct page_info* page)
-{
- struct domain* d = page_get_owner(page);
- BUG_ON((page->count_info & PGC_count_mask) < 1);
- if (d != NULL) {
- int ret = get_page(page, d);
- BUG_ON(ret == 0);
- } else {
- unsigned long x, nx, y;
-
- y = page->count_info;
- do {
- x = y;
- nx = x + 1;
-
- BUG_ON((x >> 32) != 0);
- BUG_ON((nx & PGC_count_mask) != 2);
- y = cmpxchg(&page->count_info, x, nx);
- } while (unlikely(y != x));
- BUG_ON(page_get_owner(page) != NULL);
- }
-}
-
-static void
-domain_put_page(struct domain* d, unsigned long mpaddr,
- volatile pte_t* ptep, pte_t old_pte, int clear_PGC_allocate)
-{
- unsigned long mfn = pte_pfn(old_pte);
- struct page_info* page = mfn_to_page(mfn);
-
- if (pte_pgc_allocated(old_pte)) {
- if (page_get_owner(page) == d || page_get_owner(page) == NULL) {
- BUG_ON(get_gpfn_from_mfn(mfn) != (mpaddr >> PAGE_SHIFT));
- set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
- } else {
- BUG();
- }
-
- if (likely(clear_PGC_allocate)) {
- if (!test_and_clear_bit(_PGC_allocated, &page->count_info))
- BUG();
- /* put_page() is done by domain_page_flush_and_put() */
- } else {
- // In this case, page reference count mustn't touched.
- // domain_page_flush_and_put() decrements it, we increment
- // it in advence. This patch is slow path.
- //
- // guest_remove_page(): owner = d, count_info = 1
- // memory_exchange(): owner = NULL, count_info = 1
- // XENMEM_add_to_physmap: ower = d, count_info >= 1
- adjust_page_count_info(page);
- }
- }
- domain_page_flush_and_put(d, mpaddr, ptep, old_pte, page);
-}
-
-// caller must get_page(mfn_to_page(mfn)) before call.
-// caller must call set_gpfn_from_mfn() before call if necessary.
-// because set_gpfn_from_mfn() result must be visible before pte xchg
-// caller must use memory barrier. NOTE: xchg has acquire semantics.
-// flags: ASSIGN_xxx
-static void
-assign_domain_page_replace(struct domain *d, unsigned long mpaddr,
- unsigned long mfn, unsigned long flags)
-{
- struct mm_struct *mm = &d->arch.mm;
- volatile pte_t* pte;
- pte_t old_pte;
- pte_t npte;
- unsigned long prot = flags_to_prot(flags);
-
- pte = lookup_alloc_domain_pte(d, mpaddr);
-
- // update pte
- npte = pfn_pte(mfn, __pgprot(prot));
- old_pte = ptep_xchg(mm, mpaddr, pte, npte);
- if (pte_mem(old_pte)) {
- unsigned long old_mfn = pte_pfn(old_pte);
-
- // mfn = old_mfn case can happen when domain maps a granted page
- // twice with the same pseudo physial address.
- // It's non sense, but allowed.
- // __gnttab_map_grant_ref()
- // => create_host_mapping()
- // => assign_domain_page_replace()
- if (mfn != old_mfn) {
- domain_put_page(d, mpaddr, pte, old_pte, 1);
- }
- }
- perfc_incr(assign_domain_page_replace);
-}
-
-// caller must get_page(new_page) before
-// Only steal_page() calls this function.
-static int
-assign_domain_page_cmpxchg_rel(struct domain* d, unsigned long mpaddr,
- struct page_info* old_page,
- struct page_info* new_page,
- unsigned long flags, int clear_PGC_allocate)
-{
- struct mm_struct *mm = &d->arch.mm;
- volatile pte_t* pte;
- unsigned long old_mfn;
- unsigned long old_prot;
- pte_t old_pte;
- unsigned long new_mfn;
- unsigned long new_prot;
- pte_t new_pte;
- pte_t ret_pte;
-
- BUG_ON((flags & ASSIGN_pgc_allocated) == 0);
- pte = lookup_alloc_domain_pte(d, mpaddr);
-
- again:
- old_prot = pte_val(*pte) & ~_PAGE_PPN_MASK;
- old_mfn = page_to_mfn(old_page);
- old_pte = pfn_pte(old_mfn, __pgprot(old_prot));
- if (!pte_present(old_pte)) {
- gdprintk(XENLOG_INFO,
- "%s: old_pte 0x%lx old_prot 0x%lx old_mfn 0x%lx\n",
- __func__, pte_val(old_pte), old_prot, old_mfn);
- return -EINVAL;
- }
-
- new_prot = flags_to_prot(flags);
- new_mfn = page_to_mfn(new_page);
- new_pte = pfn_pte(new_mfn, __pgprot(new_prot));
-
- // update pte
- ret_pte = ptep_cmpxchg_rel(mm, mpaddr, pte, old_pte, new_pte);
- if (unlikely(pte_val(old_pte) != pte_val(ret_pte))) {
- if (pte_pfn(old_pte) == pte_pfn(ret_pte)) {
- goto again;
- }
-
- gdprintk(XENLOG_INFO,
- "%s: old_pte 0x%lx old_prot 0x%lx old_mfn 0x%lx "
- "ret_pte 0x%lx ret_mfn 0x%lx\n",
- __func__,
- pte_val(old_pte), old_prot, old_mfn,
- pte_val(ret_pte), pte_pfn(ret_pte));
- return -EINVAL;
- }
-
- BUG_ON(!pte_mem(old_pte));
- BUG_ON(!pte_pgc_allocated(old_pte));
- BUG_ON(page_get_owner(old_page) != d);
- BUG_ON(get_gpfn_from_mfn(old_mfn) != (mpaddr >> PAGE_SHIFT));
- BUG_ON(old_mfn == new_mfn);
-
- set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
- if (likely(clear_PGC_allocate)) {
- if (!test_and_clear_bit(_PGC_allocated, &old_page->count_info))
- BUG();
- } else {
- int ret;
- // adjust for count_info for domain_page_flush_and_put()
- // This is slow path.
- BUG_ON(!test_bit(_PGC_allocated, &old_page->count_info));
- BUG_ON(d == NULL);
- ret = get_page(old_page, d);
- BUG_ON(ret == 0);
- }
-
- domain_page_flush_and_put(d, mpaddr, pte, old_pte, old_page);
- perfc_incr(assign_domain_pge_cmpxchg_rel);
- return 0;
-}
-
-static void
-zap_domain_page_one(struct domain *d, unsigned long mpaddr,
- int clear_PGC_allocate, unsigned long mfn)
-{
- struct mm_struct *mm = &d->arch.mm;
- volatile pte_t *pte;
- pte_t old_pte;
- struct page_info *page;
-
- pte = lookup_noalloc_domain_pte_none(d, mpaddr);
- if (pte == NULL)
- return;
- if (pte_none(*pte))
- return;
-
- if (mfn == INVALID_MFN) {
- // clear pte
- old_pte = ptep_get_and_clear(mm, mpaddr, pte);
- if(!pte_mem(old_pte))
- return;
- mfn = pte_pfn(old_pte);
- } else {
- unsigned long old_arflags;
- pte_t new_pte;
- pte_t ret_pte;
-
- again:
- // memory_exchange() calls guest_physmap_remove_page() with
- // a stealed page. i.e. page owner = NULL.
- BUG_ON(mfn_valid(mfn) &&
- page_get_owner(mfn_to_page(mfn)) != d &&
- page_get_owner(mfn_to_page(mfn)) != NULL);
- old_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK;
- old_pte = pfn_pte(mfn, __pgprot(old_arflags));
- new_pte = __pte(0);
-
- // update pte
- ret_pte = ptep_cmpxchg_rel(mm, mpaddr, pte, old_pte, new_pte);
- if (unlikely(pte_val(old_pte) != pte_val(ret_pte))) {
- if (pte_pfn(old_pte) == pte_pfn(ret_pte)) {
- goto again;
- }
-
- gdprintk(XENLOG_INFO, "%s: old_pte 0x%lx old_arflags 0x%lx mfn 0x%lx "
- "ret_pte 0x%lx ret_mfn 0x%lx\n",
- __func__,
- pte_val(old_pte), old_arflags, mfn,
- pte_val(ret_pte), pte_pfn(ret_pte));
- return;
- }
- BUG_ON(mfn != pte_pfn(ret_pte));
- }
-
- perfc_incr(zap_domain_page_one);
- if(!mfn_valid(mfn))
- return;
-
- if ( iommu_enabled && need_iommu(d) ){
- int i, j;
- j = 1 << (PAGE_SHIFT-PAGE_SHIFT_4K);
- for(i = 0 ; i < j; i++)
- iommu_unmap_page(d, (mpaddr>>PAGE_SHIFT)*j + i);
- }
-
- page = mfn_to_page(mfn);
- BUG_ON((page->count_info & PGC_count_mask) == 0);
-
- BUG_ON(clear_PGC_allocate && (page_get_owner(page) == NULL));
- domain_put_page(d, mpaddr, pte, old_pte, clear_PGC_allocate);
-}
-
-int
-deassign_domain_mmio_page(struct domain *d, unsigned long mpaddr,
- unsigned long phys_addr, unsigned long size )
-{
- unsigned long addr = mpaddr & PAGE_MASK;
- unsigned long end = PAGE_ALIGN(mpaddr + size);
-
- if (size == 0) {
- gdprintk(XENLOG_INFO, "%s: domain %p mpaddr 0x%lx size = 0x%lx\n",
- __func__, d, mpaddr, size);
- }
- if (!efi_mmio(phys_addr, size)) {
-#ifndef NDEBUG
- gdprintk(XENLOG_INFO, "%s: domain %p mpaddr 0x%lx size = 0x%lx\n",
- __func__, d, mpaddr, size);
-#endif
- return -EINVAL;
- }
-
- for (; addr < end; addr += PAGE_SIZE )
- zap_domain_page_one(d, addr, 0, INVALID_MFN);
- return 0;
-}
-
-unsigned long
-dom0vp_zap_physmap(struct domain *d, unsigned long gpfn,
- unsigned int extent_order)
-{
- if (extent_order != 0) {
- //XXX
- return -ENOSYS;
- }
-
- zap_domain_page_one(d, gpfn << PAGE_SHIFT, 1, INVALID_MFN);
- perfc_incr(dom0vp_zap_physmap);
- return 0;
-}
-
-static unsigned long
-__dom0vp_add_physmap(struct domain* d, unsigned long gpfn,
- unsigned long mfn_or_gmfn,
- unsigned long flags, domid_t domid, int is_gmfn)
-{
- int error = -EINVAL;
- struct domain* rd;
- unsigned long mfn;
-
- /* Not allowed by a domain. */
- if (flags & (ASSIGN_nocache | ASSIGN_pgc_allocated))
- return -EINVAL;
-
- rd = rcu_lock_domain_by_id(domid);
- if (unlikely(rd == NULL)) {
- switch (domid) {
- case DOMID_XEN:
- rd = dom_xen;
- break;
- case DOMID_IO:
- rd = dom_io;
- break;
- default:
- gdprintk(XENLOG_INFO, "d 0x%p domid %d "
- "gpfn 0x%lx mfn_or_gmfn 0x%lx flags 0x%lx domid %d\n",
- d, d->domain_id, gpfn, mfn_or_gmfn, flags, domid);
- return -ESRCH;
- }
- BUG_ON(rd == NULL);
- rcu_lock_domain(rd);
- }
-
- if (unlikely(rd == d))
- goto out1;
- /*
- * DOMID_XEN and DOMID_IO don't have their own p2m table.
- * It can be considered that their p2m conversion is p==m.
- */
- if (likely(is_gmfn && domid != DOMID_XEN && domid != DOMID_IO))
- mfn = gmfn_to_mfn(rd, mfn_or_gmfn);
- else
- mfn = mfn_or_gmfn;
- if (unlikely(!mfn_valid(mfn) || get_page(mfn_to_page(mfn), rd) == 0))
- goto out1;
-
- error = 0;
- BUG_ON(page_get_owner(mfn_to_page(mfn)) == d &&
- get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
- assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags);
- //don't update p2m table because this page belongs to rd, not d.
- perfc_incr(dom0vp_add_physmap);
-out1:
- rcu_unlock_domain(rd);
- return error;
-}
-
-unsigned long
-dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn,
- unsigned long flags, domid_t domid)
-{
- return __dom0vp_add_physmap(d, gpfn, mfn, flags, domid, 0);
-}
-
-unsigned long
-dom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn,
- unsigned long gmfn, unsigned long flags,
- domid_t domid)
-{
- return __dom0vp_add_physmap(d, gpfn, gmfn, flags, domid, 1);
-}
-
-#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
-#define P2M_PFN_ROUNDUP(x) (((x) + PTRS_PER_PTE - 1) & \
- ~(PTRS_PER_PTE - 1))
-#define P2M_PFN_ROUNDDOWN(x) ((x) & ~(PTRS_PER_PTE - 1))
-#define P2M_NUM_PFN(x) (((x) + PTRS_PER_PTE - 1) / PTRS_PER_PTE)
-#define MD_END(md) ((md)->phys_addr + \
- ((md)->num_pages << EFI_PAGE_SHIFT))
-static struct page_info* p2m_pte_zero_page = NULL;
-
-/* This must called before dom0 p2m table allocation */
-void __init
-expose_p2m_init(void)
-{
- pte_t* pte;
-
- /*
- * Initialise our DOMID_P2M domain.
- * This domain owns m2p table pages.
- */
- dom_p2m = domain_create(DOMID_P2M, DOMCRF_dummy, 0);
- BUG_ON(dom_p2m == NULL);
- dom_p2m->max_pages = ~0U;
-
- pte = pte_alloc_one_kernel(NULL, 0);
- BUG_ON(pte == NULL);
- smp_mb();// make contents of the page visible.
- p2m_pte_zero_page = virt_to_page(pte);
-}
-
-// allocate pgd, pmd of dest_dom if necessary
-static int
-allocate_pgd_pmd(struct domain* dest_dom, unsigned long dest_gpfn,
- struct domain* src_dom,
- unsigned long src_gpfn, unsigned long num_src_gpfn)
-{
- unsigned long i = 0;
-
- BUG_ON((src_gpfn % PTRS_PER_PTE) != 0);
- BUG_ON((num_src_gpfn % PTRS_PER_PTE) != 0);
-
- while (i < num_src_gpfn) {
- volatile pte_t* src_pte;
- volatile pte_t* dest_pte;
-
- src_pte = lookup_noalloc_domain_pte(src_dom,
- (src_gpfn + i) << PAGE_SHIFT);
- if (src_pte == NULL) {
- i++;
- continue;
- }
-
- dest_pte = lookup_alloc_domain_pte(dest_dom,
- (dest_gpfn << PAGE_SHIFT) +
- i * sizeof(pte_t));
- if (dest_pte == NULL) {
- gdprintk(XENLOG_INFO, "%s failed to allocate pte page\n",
- __func__);
- return -ENOMEM;
- }
-
- // skip to next pte page
- i = P2M_PFN_ROUNDDOWN(i + PTRS_PER_PTE);
- }
- return 0;
-}
-
-static int
-expose_p2m_page(struct domain* d, unsigned long mpaddr, struct page_info* page)
-{
- int ret = get_page(page, dom_p2m);
- BUG_ON(ret != 1);
- return __assign_domain_page(d, mpaddr, page_to_maddr(page),
- ASSIGN_readonly);
-}
-
-// expose pte page
-static int
-expose_p2m_range(struct domain* dest_dom, unsigned long dest_gpfn,
- struct domain* src_dom,
- unsigned long src_gpfn, unsigned long num_src_gpfn)
-{
- unsigned long i = 0;
-
- BUG_ON((src_gpfn % PTRS_PER_PTE) != 0);
- BUG_ON((num_src_gpfn % PTRS_PER_PTE) != 0);
-
- while (i < num_src_gpfn) {
- volatile pte_t* pte;
-
- pte = lookup_noalloc_domain_pte(src_dom, (src_gpfn + i) << PAGE_SHIFT);
- if (pte == NULL) {
- i++;
- continue;
- }
-
- if (expose_p2m_page(dest_dom,
- (dest_gpfn << PAGE_SHIFT) + i * sizeof(pte_t),
- virt_to_page(pte)) < 0) {
- gdprintk(XENLOG_INFO, "%s failed to assign page\n", __func__);
- return -EAGAIN;
- }
-
- // skip to next pte page
- i = P2M_PFN_ROUNDDOWN(i + PTRS_PER_PTE);
- }
- return 0;
-}
-
-// expose p2m_pte_zero_page
-static int
-expose_zero_page(struct domain* dest_dom, unsigned long dest_gpfn,
- unsigned long num_src_gpfn)
-{
- unsigned long i;
-
- for (i = 0; i < P2M_NUM_PFN(num_src_gpfn); i++) {
- volatile pte_t* pte;
- pte = lookup_noalloc_domain_pte(dest_dom,
- (dest_gpfn + i) << PAGE_SHIFT);
- if (pte == NULL || pte_present(*pte))
- continue;
-
- if (expose_p2m_page(dest_dom, (dest_gpfn + i) << PAGE_SHIFT,
- p2m_pte_zero_page) < 0) {
- gdprintk(XENLOG_INFO, "%s failed to assign zero-pte page\n",
- __func__);
- return -EAGAIN;
- }
- }
- return 0;
-}
-
-static int
-expose_p2m(struct domain* dest_dom, unsigned long dest_gpfn,
- struct domain* src_dom,
- unsigned long src_gpfn, unsigned long num_src_gpfn)
-{
- if (allocate_pgd_pmd(dest_dom, dest_gpfn,
- src_dom, src_gpfn, num_src_gpfn))
- return -ENOMEM;
-
- if (expose_p2m_range(dest_dom, dest_gpfn,
- src_dom, src_gpfn, num_src_gpfn))
- return -EAGAIN;
-
- if (expose_zero_page(dest_dom, dest_gpfn, num_src_gpfn))
- return -EAGAIN;
-
- return 0;
-}
-
-static void
-unexpose_p2m(struct domain* dest_dom,
- unsigned long dest_gpfn, unsigned long num_dest_gpfn)
-{
- unsigned long i;
-
- for (i = 0; i < num_dest_gpfn; i++) {
- zap_domain_page_one(dest_dom, (dest_gpfn + i) << PAGE_SHIFT,
- 0, INVALID_MFN);
- }
-}
-
-// It is possible to optimize loop, But this isn't performance critical.
-unsigned long
-dom0vp_expose_p2m(struct domain* d,
- unsigned long conv_start_gpfn,
- unsigned long assign_start_gpfn,
- unsigned long expose_size, unsigned long granule_pfn)
-{
- unsigned long ret;
- unsigned long expose_num_pfn = expose_size >> PAGE_SHIFT;
-
- if ((expose_size % PAGE_SIZE) != 0 ||
- (granule_pfn % PTRS_PER_PTE) != 0 ||
- (expose_num_pfn % PTRS_PER_PTE) != 0 ||
- (conv_start_gpfn % granule_pfn) != 0 ||
- (assign_start_gpfn % granule_pfn) != 0 ||
- (expose_num_pfn % granule_pfn) != 0) {
- gdprintk(XENLOG_INFO,
- "%s conv_start_gpfn 0x%016lx assign_start_gpfn 0x%016lx "
- "expose_size 0x%016lx granulte_pfn 0x%016lx\n", __func__,
- conv_start_gpfn, assign_start_gpfn, expose_size, granule_pfn);
- return -EINVAL;
- }
-
- if (granule_pfn != PTRS_PER_PTE) {
- gdprintk(XENLOG_INFO,
- "%s granule_pfn 0x%016lx PTRS_PER_PTE 0x%016lx\n",
- __func__, granule_pfn, PTRS_PER_PTE);
- return -ENOSYS;
- }
- ret = expose_p2m(d, assign_start_gpfn,
- d, conv_start_gpfn, expose_num_pfn);
- return ret;
-}
-
-static int
-memmap_info_copy_from_guest(struct xen_ia64_memmap_info* memmap_info,
- char** memmap_p,
- XEN_GUEST_HANDLE(char) buffer)
-{
- char *memmap;
- char *p;
- char *memmap_end;
- efi_memory_desc_t *md;
- unsigned long start;
- unsigned long end;
- efi_memory_desc_t *prev_md;
-
- if (copy_from_guest((char*)memmap_info, buffer, sizeof(*memmap_info)))
- return -EFAULT;
- if (memmap_info->efi_memdesc_size < sizeof(efi_memory_desc_t) ||
- memmap_info->efi_memmap_size < memmap_info->efi_memdesc_size ||
- (memmap_info->efi_memmap_size % memmap_info->efi_memdesc_size) != 0)
- return -EINVAL;
-
- memmap = _xmalloc(memmap_info->efi_memmap_size,
- __alignof__(efi_memory_desc_t));
- if (memmap == NULL)
- return -ENOMEM;
- if (copy_from_guest_offset(memmap, buffer, sizeof(*memmap_info),
- memmap_info->efi_memmap_size)) {
- xfree(memmap);
- return -EFAULT;
- }
-
- /* intergirty check & simplify */
- sort(memmap, memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size,
- memmap_info->efi_memdesc_size, efi_mdt_cmp, NULL);
-
- /* alignement & overlap check */
- prev_md = NULL;
- p = memmap;
- memmap_end = memmap + memmap_info->efi_memmap_size;
- for (p = memmap; p < memmap_end; p += memmap_info->efi_memmap_size) {
- md = (efi_memory_desc_t*)p;
- start = md->phys_addr;
-
- if (start & ((1UL << EFI_PAGE_SHIFT) - 1) || md->num_pages == 0) {
- xfree(memmap);
- return -EINVAL;
- }
-
- if (prev_md != NULL) {
- unsigned long prev_end = MD_END(prev_md);
- if (prev_end > start) {
- xfree(memmap);
- return -EINVAL;
- }
- }
-
- prev_md = (efi_memory_desc_t *)p;
- }
-
- /* coalease */
- prev_md = NULL;
- p = memmap;
- while (p < memmap_end) {
- md = (efi_memory_desc_t*)p;
- start = md->phys_addr;
- end = MD_END(md);
-
- start = P2M_PFN_ROUNDDOWN(start >> PAGE_SHIFT) << PAGE_SHIFT;
- end = P2M_PFN_ROUNDUP(end >> PAGE_SHIFT) << PAGE_SHIFT;
- md->phys_addr = start;
- md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
-
- if (prev_md != NULL) {
- unsigned long prev_end = MD_END(prev_md);
- if (prev_end >= start) {
- size_t left;
- end = max(prev_end, end);
- prev_md->num_pages = (end - prev_md->phys_addr) >> EFI_PAGE_SHIFT;
-
- left = memmap_end - p;
- if (left > memmap_info->efi_memdesc_size) {
- left -= memmap_info->efi_memdesc_size;
- memmove(p, p + memmap_info->efi_memdesc_size, left);
- }
-
- memmap_info->efi_memmap_size -= memmap_info->efi_memdesc_size;
- memmap_end -= memmap_info->efi_memdesc_size;
- continue;
- }
- }
-
- prev_md = md;
- p += memmap_info->efi_memdesc_size;
- }
-
- if (copy_to_guest(buffer, (char*)memmap_info, sizeof(*memmap_info)) ||
- copy_to_guest_offset(buffer, sizeof(*memmap_info),
- (char*)memmap, memmap_info->efi_memmap_size)) {
- xfree(memmap);
- return -EFAULT;
- }
-
- *memmap_p = memmap;
- return 0;
-}
-
-static int
-foreign_p2m_allocate_pte(struct domain* d,
- const struct xen_ia64_memmap_info* memmap_info,
- const void* memmap)
-{
- const void* memmap_end = memmap + memmap_info->efi_memmap_size;
- const void* p;
-
- for (p = memmap; p < memmap_end; p += memmap_info->efi_memdesc_size) {
- const efi_memory_desc_t* md = p;
- unsigned long start = md->phys_addr;
- unsigned long end = MD_END(md);
- unsigned long gpaddr;
-
- for (gpaddr = start; gpaddr < end; gpaddr += PAGE_SIZE) {
- if (lookup_alloc_domain_pte(d, gpaddr) == NULL) {
- return -ENOMEM;
- }
- }
- }
-
- return 0;
-}
-
-struct foreign_p2m_region {
- unsigned long gpfn;
- unsigned long num_gpfn;
-};
-
-struct foreign_p2m_entry {
- struct list_head list;
- int busy;
-
- /* src domain */
- struct domain* src_dom;
-
- /* region into which foreign p2m table is mapped */
- unsigned long gpfn;
- unsigned long num_gpfn;
- unsigned int num_region;
- struct foreign_p2m_region region[0];
-};
-
-/* caller must increment the reference count of src_dom */
-static int
-foreign_p2m_alloc(struct foreign_p2m* foreign_p2m,
- unsigned long dest_gpfn, struct domain* src_dom,
- struct xen_ia64_memmap_info* memmap_info, void* memmap,
- struct foreign_p2m_entry** entryp)
-{
- void* memmap_end = memmap + memmap_info->efi_memmap_size;
- efi_memory_desc_t* md;
- unsigned long dest_gpfn_end;
- unsigned long src_gpfn;
- unsigned long src_gpfn_end;
-
- unsigned int num_region;
- struct foreign_p2m_entry* entry;
- struct foreign_p2m_entry* prev;
- struct foreign_p2m_entry* pos;
-
- num_region = (memmap_end - memmap) / memmap_info->efi_memdesc_size;
-
- md = memmap;
- src_gpfn = P2M_PFN_ROUNDDOWN(md->phys_addr >> PAGE_SHIFT);
-
- md = memmap + (num_region - 1) * memmap_info->efi_memdesc_size;
- src_gpfn_end = MD_END(md) >> PAGE_SHIFT;
- if (src_gpfn_end >
- P2M_PFN_ROUNDUP(src_dom->arch.convmem_end >> PAGE_SHIFT))
- return -EINVAL;
-
- src_gpfn_end = P2M_PFN_ROUNDUP(src_gpfn_end);
- dest_gpfn_end = dest_gpfn + P2M_NUM_PFN(src_gpfn_end - src_gpfn);
- entry = _xmalloc(sizeof(*entry) + num_region * sizeof(entry->region[0]),
- __alignof__(*entry));
- if (entry == NULL)
- return -ENOMEM;
-
- entry->busy = 1;
- entry->gpfn = dest_gpfn;
- entry->num_gpfn = dest_gpfn_end - dest_gpfn;
- entry->src_dom = src_dom;
- entry->num_region = 0;
- memset(entry->region, 0, sizeof(entry->region[0]) * num_region);
- prev = NULL;
-
- spin_lock(&foreign_p2m->lock);
- if (list_empty(&foreign_p2m->head))
- prev = (struct foreign_p2m_entry*)&foreign_p2m->head;
-
- list_for_each_entry(pos, &foreign_p2m->head, list) {
- if (pos->gpfn + pos->num_gpfn < dest_gpfn) {
- prev = pos;
- continue;
- }
-
- if (dest_gpfn_end < pos->gpfn) {
- if (prev != NULL && prev->gpfn + prev->num_gpfn > dest_gpfn)
- prev = NULL;/* overlap */
- break;
- }
-
- /* overlap */
- prev = NULL;
- break;
- }
- if (prev != NULL) {
- list_add(&entry->list, &prev->list);
- spin_unlock(&foreign_p2m->lock);
- *entryp = entry;
- return 0;
- }
- spin_unlock(&foreign_p2m->lock);
- xfree(entry);
- return -EBUSY;
-}
-
-static void
-foreign_p2m_unexpose(struct domain* dest_dom, struct foreign_p2m_entry* entry)
-{
- unsigned int i;
-
- BUG_ON(!entry->busy);
- for (i = 0; i < entry->num_region; i++)
- unexpose_p2m(dest_dom,
- entry->region[i].gpfn, entry->region[i].num_gpfn);
-}
-
-static void
-foreign_p2m_unbusy(struct foreign_p2m* foreign_p2m,
- struct foreign_p2m_entry* entry)
-{
- spin_lock(&foreign_p2m->lock);
- BUG_ON(!entry->busy);
- entry->busy = 0;
- spin_unlock(&foreign_p2m->lock);
-}
-
-static void
-foreign_p2m_free(struct foreign_p2m* foreign_p2m,
- struct foreign_p2m_entry* entry)
-{
- spin_lock(&foreign_p2m->lock);
- BUG_ON(!entry->busy);
- list_del(&entry->list);
- spin_unlock(&foreign_p2m->lock);
-
- put_domain(entry->src_dom);
- xfree(entry);
-}
-
-void
-foreign_p2m_init(struct domain* d)
-{
- struct foreign_p2m* foreign_p2m = &d->arch.foreign_p2m;
- INIT_LIST_HEAD(&foreign_p2m->head);
- spin_lock_init(&foreign_p2m->lock);
-}
-
-void
-foreign_p2m_destroy(struct domain* d)
-{
- struct foreign_p2m* foreign_p2m = &d->arch.foreign_p2m;
- struct foreign_p2m_entry* entry;
- struct foreign_p2m_entry* n;
-
- spin_lock(&foreign_p2m->lock);
- list_for_each_entry_safe(entry, n, &foreign_p2m->head, list) {
- /* mm_teardown() cleared p2m table already */
- /* foreign_p2m_unexpose(d, entry);*/
- list_del(&entry->list);
- put_domain(entry->src_dom);
- xfree(entry);
- }
- spin_unlock(&foreign_p2m->lock);
-}
-
-unsigned long
-dom0vp_expose_foreign_p2m(struct domain* dest_dom,
- unsigned long dest_gpfn,
- domid_t domid,
- XEN_GUEST_HANDLE(char) buffer,
- unsigned long flags)
-{
- unsigned long ret = 0;
- struct domain* src_dom;
- struct xen_ia64_memmap_info memmap_info;
- char* memmap;
- void* memmap_end;
- void* p;
-
- struct foreign_p2m_entry* entry;
-
- ret = memmap_info_copy_from_guest(&memmap_info, &memmap, buffer);
- if (ret != 0)
- return ret;
-
- dest_dom = rcu_lock_domain(dest_dom);
- if (dest_dom == NULL) {
- ret = -EINVAL;
- goto out;
- }
-#if 1
- // Self foreign domain p2m exposure isn't allowed.
- // Otherwise the domain can't be destroyed because
- // no one decrements the domain reference count.
- if (domid == dest_dom->domain_id) {
- ret = -EINVAL;
- goto out;
- }
-#endif
-
- src_dom = get_domain_by_id(domid);
- if (src_dom == NULL) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (flags & IA64_DOM0VP_EFP_ALLOC_PTE) {
- ret = foreign_p2m_allocate_pte(src_dom, &memmap_info, memmap);
- if (ret != 0)
- goto out_unlock;
- }
-
- ret = foreign_p2m_alloc(&dest_dom->arch.foreign_p2m, dest_gpfn,
- src_dom, &memmap_info, memmap, &entry);
- if (ret != 0)
- goto out_unlock;
-
- memmap_end = memmap + memmap_info.efi_memmap_size;
- for (p = memmap; p < memmap_end; p += memmap_info.efi_memdesc_size) {
- efi_memory_desc_t* md = p;
- unsigned long src_gpfn =
- P2M_PFN_ROUNDDOWN(md->phys_addr >> PAGE_SHIFT);
- unsigned long src_gpfn_end =
- P2M_PFN_ROUNDUP(MD_END(md) >> PAGE_SHIFT);
- unsigned long num_src_gpfn = src_gpfn_end - src_gpfn;
-
- ret = expose_p2m(dest_dom, dest_gpfn + src_gpfn / PTRS_PER_PTE,
- src_dom, src_gpfn, num_src_gpfn);
- if (ret != 0)
- break;
-
- entry->region[entry->num_region].gpfn =
- dest_gpfn + src_gpfn / PTRS_PER_PTE;
- entry->region[entry->num_region].num_gpfn = P2M_NUM_PFN(num_src_gpfn);
- entry->num_region++;
- }
-
- if (ret == 0) {
- foreign_p2m_unbusy(&dest_dom->arch.foreign_p2m, entry);
- } else {
- foreign_p2m_unexpose(dest_dom, entry);
- foreign_p2m_free(&dest_dom->arch.foreign_p2m, entry);
- }
-
- out_unlock:
- rcu_unlock_domain(dest_dom);
- out:
- xfree(memmap);
- return ret;
-}
-
-unsigned long
-dom0vp_unexpose_foreign_p2m(struct domain* dest_dom,
- unsigned long dest_gpfn,
- domid_t domid)
-{
- int ret = -ENOENT;
- struct foreign_p2m* foreign_p2m = &dest_dom->arch.foreign_p2m;
- struct foreign_p2m_entry* entry;
-
- dest_dom = rcu_lock_domain(dest_dom);
- if (dest_dom == NULL)
- return ret;
- spin_lock(&foreign_p2m->lock);
- list_for_each_entry(entry, &foreign_p2m->head, list) {
- if (entry->gpfn < dest_gpfn)
- continue;
- if (dest_gpfn < entry->gpfn)
- break;
-
- if (domid == entry->src_dom->domain_id)
- ret = 0;
- else
- ret = -EINVAL;
- break;
- }
- if (ret == 0) {
- if (entry->busy == 0)
- entry->busy = 1;
- else
- ret = -EBUSY;
- }
- spin_unlock(&foreign_p2m->lock);
-
- if (ret == 0) {
- foreign_p2m_unexpose(dest_dom, entry);
- foreign_p2m_free(&dest_dom->arch.foreign_p2m, entry);
- }
- rcu_unlock_domain(dest_dom);
- return ret;
-}
-
-/* this lock can be only for memmap_info. domain_lock() is abused here */
-static void
-memmap_lock(struct domain *d)
-{
- domain_lock(d);
-}
-
-static void
-memmap_unlock(struct domain *d)
-{
- domain_unlock(d);
-}
-
-/* copy memory range to domain pseudo physical address space */
-static int
-__memmap_copy_to(struct domain *d, unsigned long dest_gpfn,
- void *src, unsigned long num_pages)
-{
- BUG_ON(((unsigned long)src & ~PAGE_MASK) != 0);
-
- while (num_pages > 0) {
- unsigned long mfn;
- struct page_info *page;
- void *virt;
-
- mfn = gmfn_to_mfn_foreign(d, dest_gpfn);
- if (mfn == 0 || mfn == INVALID_MFN)
- return -EFAULT;
- page = mfn_to_page(mfn);
- if (get_page(page, d) == 0)
- return -EFAULT;
- virt = mfn_to_virt(mfn);
- copy_page(virt, src);
- __xencomm_mark_dirty(d, (unsigned long)virt, PAGE_SIZE);
- put_page(page);
-
- src += PAGE_SIZE;
- dest_gpfn++;
- num_pages--;
- }
-
- return 0;
-}
-
-/* copy memory range from domain pseudo physical address space */
-static int
-__memmap_copy_from(void *dest, struct domain *d, unsigned long src_gpfn,
- unsigned long num_pages)
-{
- BUG_ON(((unsigned long)dest & ~PAGE_MASK) != 0);
-
- while (num_pages > 0) {
- unsigned long mfn;
- struct page_info *page;
-
- mfn = gmfn_to_mfn_foreign(d, src_gpfn);
- if (mfn == 0 || mfn == INVALID_MFN)
- return -EFAULT;
- page = mfn_to_page(mfn);
- if (get_page(page, d) == 0)
- return -EFAULT;
- copy_page(dest, mfn_to_virt(mfn));
- put_page(page);
-
- dest += PAGE_SIZE;
- src_gpfn++;
- num_pages--;
- }
-
- return 0;
-}
-
-/* This function unlock/lock memmeap_lock.
- * caller must free (*page, *order) even if error case by ckecking
- * *page = NULL.
- */
-static int
-memmap_copy_from(struct domain *d,
- struct page_info **page, unsigned long *order)
-{
- unsigned long num_pages;
- struct xen_ia64_memmap_info *memmap_info;
- unsigned long memmap_info_pfn;
-
- num_pages = d->shared_info->arch.memmap_info_num_pages;
- memmap_unlock(d);
-
- again:
- *order = get_order(num_pages << PAGE_SHIFT);
- *page = alloc_domheap_pages(NULL, *order, 0);
- if (*page == NULL)
- return -ENOMEM;
- memmap_info = page_to_virt(*page);
-
- memmap_lock(d);
- if (d->shared_info->arch.memmap_info_num_pages != num_pages) {
- num_pages = d->shared_info->arch.memmap_info_num_pages;
- memmap_unlock(d);
- free_domheap_pages(*page, *order);
- goto again;
- }
- memmap_info_pfn = d->shared_info->arch.memmap_info_pfn;
-
- /* copy into local to make them virtually contiguous */
- return __memmap_copy_from(memmap_info, d, memmap_info_pfn, num_pages);
-}
-
-static int
-memdesc_can_expand(const struct xen_ia64_memmap_info *memmap_info,
- unsigned long num_pages)
-{
- /* Is there room for one more md? */
- if ((num_pages << PAGE_SHIFT) <
- (sizeof(*memmap_info) + memmap_info->efi_memmap_size +
- memmap_info->efi_memdesc_size))
- return 0;
-
- return 1;
-}
-
-static int
-memdesc_can_collapse(const efi_memory_desc_t *lhs,
- const efi_memory_desc_t *rhs)
-{
- return (lhs->type == rhs->type && lhs->attribute == rhs->attribute);
-}
-
-static int
-__dom0vp_add_memdesc_one(struct xen_ia64_memmap_info *memmap_info,
- unsigned long num_pages,
- const efi_memory_desc_t *md)
-{
- void* const memmap_end = (void*)memmap_info->memdesc +
- memmap_info->efi_memmap_size;
- void *p;
- efi_memory_desc_t *tmp_md;
- efi_memory_desc_t *s_md;
- efi_memory_desc_t *e_md;
- u64 phys_addr;
- u64 phys_addr_end;
-
- /* fast path. appending to the last entry */
- tmp_md = (efi_memory_desc_t*)(memmap_end - memmap_info->efi_memdesc_size);
- if (MD_END(tmp_md) < md->phys_addr) {
- /* append one */
- if (!memdesc_can_expand(memmap_info, num_pages))
- return -ENOMEM;
-
- memcpy(memmap_end, md, memmap_info->efi_memdesc_size);
- memmap_info->efi_memmap_size += memmap_info->efi_memdesc_size;
- return 0;
- }
- /* fast path. expand the last entry */
- if (tmp_md->phys_addr <= md->phys_addr) {
- if (!memdesc_can_collapse(tmp_md, md))
- return -EINVAL;
-
- phys_addr_end = max(MD_END(tmp_md), MD_END(md));
- tmp_md->num_pages =
- (phys_addr_end - tmp_md->phys_addr) >> EFI_PAGE_SHIFT;
- return 0;
- }
-
- /* slow path */
- s_md = NULL;
- e_md = NULL;
- for (p = memmap_info->memdesc;
- p < memmap_end;
- p += memmap_info->efi_memdesc_size) {
- tmp_md = p;
-
- if (MD_END(tmp_md) < md->phys_addr)
- continue;
-
- if (MD_END(md) < tmp_md->phys_addr) {
- if (s_md == NULL) {
- void *next_md = p + memmap_info->efi_memdesc_size;
- size_t left_size = memmap_end - (void*)tmp_md;
-
- /* found hole. just insert md here*/
- if (!memdesc_can_expand(memmap_info, num_pages))
- return -ENOMEM;
-
- memmove(next_md, tmp_md, left_size);
- memcpy(tmp_md, md, memmap_info->efi_memdesc_size);
- memmap_info->efi_memmap_size += memmap_info->efi_memdesc_size;
- return 0;
- }
- break;
- }
-
- if (s_md == NULL)
- s_md = tmp_md;
- e_md = tmp_md;
-
- if (!memdesc_can_collapse(tmp_md, md))
- return -EINVAL;
- }
- BUG_ON(s_md == NULL || e_md == NULL);
-
- /* collapse into one */
- phys_addr = min(md->phys_addr, s_md->phys_addr);
- phys_addr_end = max(MD_END(md), MD_END(e_md));
- s_md->phys_addr = phys_addr;
- s_md->num_pages = (phys_addr_end - phys_addr) >> EFI_PAGE_SHIFT;
- if (s_md != e_md) {
- void *next_s_md = (void*)s_md + memmap_info->efi_memdesc_size;
- void *next_e_md = (void*)e_md + memmap_info->efi_memdesc_size;
- size_t left_size = memmap_end - (void*)next_e_md;
-
- memmap_info->efi_memmap_size -= (void*)e_md - (void*)s_md;
- if (left_size > 0)
- memmove(next_s_md, next_e_md, left_size);
- }
-
- return 0;
-}
-
-/*
- * d->arch.convmem_end is mostly read only and sometimes increased.
- * It is protected by memmap_lock
- *
- * d->arch.convmem_end is also referned by guest(self p2m exposure)
- * d->shared_info.arch.memmap_info_xxx and memmap_info are
- * referenced by tools stack(save/dump-core/foreign p2m exposure).
- *
- * reader side:
- * - get d->arch.convmem_end (via XENMEM_maximum_gpfn)
- * - issue get_memmap hypercall to get memmap
- * In VMM
- * - lock memmap_lock
- * - copy memmap from target guest
- * - unlock memmap_lock
- * - copy memmap into tools stack address space.
- * - check d->shared_info.memmap_info_num_pages. try again if necessary
- * - get d->arch.convmem_end. try again if changed.
- *
- * writer side:
- * - lock memmap_lock
- * - increase d->arch.convmem_end at first if necessary
- * - unlock memmap_lock
- * - allocate memory
- * In fact page allocation isn't blocking, so unlock/lock isn't necessary.
- * - lock memmap_lock
- * - update memmap_info
- * - unlock memmap_lock
- */
-static int
-__dom0vp_add_memdesc(struct domain *targ_d,
- const struct xen_ia64_memmap_info *u_memmap_info,
- const char *u_memmap)
-{
- int ret = 0;
- const void* const u_memmap_end = u_memmap + u_memmap_info->efi_memmap_size;
- const efi_memory_desc_t *md;
-
- unsigned long md_end_max;
- unsigned long num_pages;
- unsigned long order;
- unsigned long memmap_info_pfn;
-
- struct page_info *page = NULL;
- struct xen_ia64_memmap_info *memmap_info;
- size_t unused_size;
-
- const void *p;
-
- /* update d->arch.convmem_end */
- md_end_max = 0;
- for (p = u_memmap; p < u_memmap_end;
- p += u_memmap_info->efi_memdesc_size) {
- md = p;
- if (MD_END(md) > md_end_max)
- md_end_max = MD_END(md);
- }
- memmap_lock(targ_d);
- /* convmem_end is also protected memdesc lock */
- if (md_end_max > targ_d->arch.convmem_end)
- targ_d->arch.convmem_end = md_end_max;
-
- /* memmap_copy_from_guest() unlock/lock memmap_lock() */
- ret = memmap_copy_from(targ_d, &page, &order);
- if (ret != 0)
- goto out;
- memmap_info = page_to_virt(page);
- num_pages = targ_d->shared_info->arch.memmap_info_num_pages;
- memmap_info_pfn = targ_d->shared_info->arch.memmap_info_pfn;
-
- if (memmap_info->efi_memdesc_size != u_memmap_info->efi_memdesc_size ||
- memmap_info->efi_memdesc_version !=
- u_memmap_info->efi_memdesc_version) {
- ret = -EINVAL;
- goto out;
- }
-
- /* update memdesc */
- for (p = u_memmap;
- p < u_memmap_end;
- p += u_memmap_info->efi_memdesc_size) {
- md = p;
- ret = __dom0vp_add_memdesc_one(memmap_info, num_pages, md);
- if (ret != 0)
- goto out;
- }
-
- /* zero out the unused region to avoid hypervisor bit leak */
- unused_size = (num_pages << PAGE_SHIFT) -
- (sizeof(*memmap_info) + memmap_info->efi_memmap_size);
- if (unused_size > 0)
- memset((void*)memmap_info->memdesc + memmap_info->efi_memmap_size,
- 0, unused_size);
-
- /* copy back into domain. */
- ret = __memmap_copy_to(targ_d, memmap_info_pfn, memmap_info, num_pages);
-
- out:
- memmap_unlock(targ_d);
-
- if (page != NULL)
- free_domheap_pages(page, order);
- return ret;
-}
-
-unsigned long
-dom0vp_get_memmap(domid_t domid, XEN_GUEST_HANDLE(char) buffer)
-{
- unsigned long ret = 0;
- struct domain *targ_d;
-
- struct page_info *page = NULL;
- unsigned long order;
-
- struct xen_ia64_memmap_info *memmap_info;
- unsigned long num_pages;
-
- ret = rcu_lock_target_domain_by_id(domid, &targ_d);
- if (ret != 0)
- return ret;
-
- memmap_lock(targ_d);
-
- ret = memmap_copy_from(targ_d, &page, &order);
- if (ret != 0)
- goto unlock_out;
-
- memmap_info = page_to_virt(page);
- num_pages = targ_d->shared_info->arch.memmap_info_num_pages;
- if ((num_pages << PAGE_SHIFT) - sizeof(*memmap_info) <
- memmap_info->efi_memmap_size) {
- ret = -EFAULT;
- goto unlock_out;
- }
- memmap_unlock(targ_d);
- rcu_unlock_domain(targ_d);
-
- if (copy_to_guest(buffer, (char*)memmap_info, sizeof(*memmap_info)) ||
- copy_to_guest_offset(buffer, sizeof(*memmap_info),
- (char*)memmap_info->memdesc,
- memmap_info->efi_memmap_size))
- ret = -EFAULT;
-
- out:
- if (page != NULL)
- free_domheap_pages(page, order);
- return ret;
-
- unlock_out:
- memmap_unlock(targ_d);
- rcu_unlock_domain(targ_d);
- goto out;
-}
-#endif
-
-// grant table host mapping
-// mpaddr: host_addr: pseudo physical address
-// mfn: frame: machine page frame
-// flags: GNTMAP_readonly | GNTMAP_application_map | GNTMAP_contains_pte
-int
-create_grant_host_mapping(unsigned long gpaddr, unsigned long mfn,
- unsigned int flags, unsigned int cache_flags)
-{
- struct domain* d = current->domain;
- struct page_info* page;
- int ret;
-
- if ((flags & (GNTMAP_device_map |
- GNTMAP_application_map | GNTMAP_contains_pte)) ||
- (cache_flags)) {
- gdprintk(XENLOG_INFO, "%s: flags 0x%x cache_flags 0x%x\n",
- __func__, flags, cache_flags);
- return GNTST_general_error;
- }
-
- BUG_ON(!mfn_valid(mfn));
- page = mfn_to_page(mfn);
- ret = get_page(page, page_get_owner(page));
- BUG_ON(ret == 0);
- assign_domain_page_replace(d, gpaddr, mfn,
-#ifdef CONFIG_XEN_IA64_TLB_TRACK
- ASSIGN_tlb_track |
-#endif
- ((flags & GNTMAP_readonly) ?
- ASSIGN_readonly : ASSIGN_writable));
- perfc_incr(create_grant_host_mapping);
- return GNTST_okay;
-}
-
-// grant table host unmapping
-int
-replace_grant_host_mapping(unsigned long gpaddr,
- unsigned long mfn, unsigned long new_gpaddr, unsigned int flags)
-{
- struct domain* d = current->domain;
- unsigned long gpfn = gpaddr >> PAGE_SHIFT;
- volatile pte_t* pte;
- unsigned long cur_arflags;
- pte_t cur_pte;
- pte_t new_pte = __pte(0);
- pte_t old_pte;
- struct page_info* page = mfn_to_page(mfn);
- struct page_info* new_page = NULL;
- volatile pte_t* new_page_pte = NULL;
- unsigned long new_page_mfn = INVALID_MFN;
-
- if (new_gpaddr) {
- new_page_pte = lookup_noalloc_domain_pte_none(d, new_gpaddr);
- if (likely(new_page_pte != NULL)) {
- new_pte = ptep_get_and_clear(&d->arch.mm,
- new_gpaddr, new_page_pte);
- if (likely(pte_present(new_pte))) {
- struct domain* page_owner;
-
- new_page_mfn = pte_pfn(new_pte);
- new_page = mfn_to_page(new_page_mfn);
- page_owner = page_get_owner(new_page);
- if (unlikely(page_owner == NULL)) {
- gdprintk(XENLOG_INFO,
- "%s: page_owner == NULL "
- "gpaddr 0x%lx mfn 0x%lx "
- "new_gpaddr 0x%lx mfn 0x%lx\n",
- __func__, gpaddr, mfn, new_gpaddr, new_page_mfn);
- new_page = NULL; /* prevent domain_put_page() */
- return GNTST_general_error;
- }
-
- /*
- * domain_put_page(clear_PGC_allcoated = 0)
- * doesn't decrement refcount of page with
- * pte_ptc_allocated() = 1. Be carefull.
- */
- if (unlikely(!pte_pgc_allocated(new_pte))) {
- /* domain_put_page() decrements page refcount. adjust it. */
- if (get_page(new_page, page_owner)) {
- gdprintk(XENLOG_INFO,
- "%s: get_page() failed. "
- "gpaddr 0x%lx mfn 0x%lx "
- "new_gpaddr 0x%lx mfn 0x%lx\n",
- __func__, gpaddr, mfn,
- new_gpaddr, new_page_mfn);
- return GNTST_general_error;
- }
- }
- domain_put_page(d, new_gpaddr, new_page_pte, new_pte, 0);
- } else
- new_pte = __pte(0);
- }
- }
-
- if (flags & (GNTMAP_application_map | GNTMAP_contains_pte)) {
- gdprintk(XENLOG_INFO, "%s: flags 0x%x\n", __func__, flags);
- return GNTST_general_error;
- }
-
- pte = lookup_noalloc_domain_pte(d, gpaddr);
- if (pte == NULL) {
- gdprintk(XENLOG_INFO, "%s: gpaddr 0x%lx mfn 0x%lx\n",
- __func__, gpaddr, mfn);
- return GNTST_general_error;
- }
-
- again:
- cur_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK;
- cur_pte = pfn_pte(mfn, __pgprot(cur_arflags));
- if (!pte_present(cur_pte) ||
- (page_get_owner(page) == d && get_gpfn_from_mfn(mfn) == gpfn)) {
- gdprintk(XENLOG_INFO, "%s: gpaddr 0x%lx mfn 0x%lx cur_pte 0x%lx\n",
- __func__, gpaddr, mfn, pte_val(cur_pte));
- return GNTST_general_error;
- }
-
- if (new_page) {
- BUG_ON(new_page_mfn == INVALID_MFN);
- set_gpfn_from_mfn(new_page_mfn, gpfn);
- /* smp_mb() isn't needed because assign_domain_pge_cmpxchg_rel()
- has release semantics. */
- }
- old_pte = ptep_cmpxchg_rel(&d->arch.mm, gpaddr, pte, cur_pte, new_pte);
- if (unlikely(pte_val(cur_pte) != pte_val(old_pte))) {
- if (pte_pfn(old_pte) == mfn) {
- goto again;
- }
- if (new_page) {
- BUG_ON(new_page_mfn == INVALID_MFN);
- set_gpfn_from_mfn(new_page_mfn, INVALID_M2P_ENTRY);
- domain_put_page(d, new_gpaddr, new_page_pte, new_pte, 1);
- }
- goto out;
- }
- if (unlikely(!pte_present(old_pte)))
- goto out;
- BUG_ON(pte_pfn(old_pte) != mfn);
-
- /* try_to_clear_PGC_allocate(d, page) is not needed. */
- BUG_ON(page_get_owner(page) == d &&
- get_gpfn_from_mfn(mfn) == gpfn);
- BUG_ON(pte_pgc_allocated(old_pte));
- domain_page_flush_and_put(d, gpaddr, pte, old_pte, page);
-
- perfc_incr(replace_grant_host_mapping);
- return GNTST_okay;
-
- out:
- gdprintk(XENLOG_INFO, "%s gpaddr 0x%lx mfn 0x%lx cur_pte "
- "0x%lx old_pte 0x%lx\n",
- __func__, gpaddr, mfn, pte_val(cur_pte), pte_val(old_pte));
- return GNTST_general_error;
-}
-
-// heavily depends on the struct page layout.
-// gnttab_transfer() calls steal_page() with memflags = 0
-// For grant table transfer, we must fill the page.
-// memory_exchange() calls steal_page() with memflags = MEMF_no_refcount
-// For memory exchange, we don't have to fill the page because
-// memory_exchange() does it.
-int
-steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
-{
-#if 0 /* if big endian */
-# error "implement big endian version of steal_page()"
-#endif
- unsigned long x, y;
-
- if (page_get_owner(page) != d) {
- gdprintk(XENLOG_INFO, "%s d 0x%p owner 0x%p\n",
- __func__, d, page_get_owner(page));
- return -1;
- }
-
- if (!(memflags & MEMF_no_refcount)) {
- unsigned long gpfn;
- struct page_info *new;
- unsigned long new_mfn;
- int ret;
-
- new = alloc_domheap_page(d, 0);
- if (new == NULL) {
- gdprintk(XENLOG_INFO, "alloc_domheap_page() failed\n");
- return -1;
- }
- // zero out pages for security reasons
- clear_page(page_to_virt(new));
- // assign_domain_page_cmpxchg_rel() has release semantics
- // so smp_mb() isn't needed.
-
- gpfn = get_gpfn_from_mfn(page_to_mfn(page));
- if (gpfn == INVALID_M2P_ENTRY) {
- free_domheap_page(new);
- return -1;
- }
- new_mfn = page_to_mfn(new);
- set_gpfn_from_mfn(new_mfn, gpfn);
- // smp_mb() isn't needed because assign_domain_pge_cmpxchg_rel()
- // has release semantics.
-
- ret = assign_domain_page_cmpxchg_rel(d, gpfn << PAGE_SHIFT, page, new,
- ASSIGN_writable |
- ASSIGN_pgc_allocated, 0);
- if (ret < 0) {
- gdprintk(XENLOG_INFO, "assign_domain_page_cmpxchg_rel failed %d\n",
- ret);
- set_gpfn_from_mfn(new_mfn, INVALID_M2P_ENTRY);
- free_domheap_page(new);
- return -1;
- }
- perfc_incr(steal_page_refcount);
- }
-
- spin_lock(&d->page_alloc_lock);
- /* check again */
- if (is_xen_heap_page(page) || page_get_owner(page) != d) {
- goto fail;
- }
-
- /*
- * We require there is just one reference (PGC_allocated). We temporarily
- * drop this reference now so that we can safely swizzle the owner.
- */
- y = page->count_info;
- do {
- x = y;
-
- if (unlikely(((x & (PGC_count_mask | PGC_allocated)) !=
- (1 | PGC_allocated)))) {
- struct domain* nd = page_get_owner(page);
- if (nd == NULL) {
- gdprintk(XENLOG_INFO, "gnttab_transfer: "
- "Bad page %p: ed=%p(%u), "
- "sd=%p,"
- " caf=%016lx, taf=%" PRtype_info
- " memflags 0x%x\n",
- (void *) page_to_mfn(page),
- d, d->domain_id,
- nd,
- x,
- page->u.inuse.type_info,
- memflags);
- } else {
- gdprintk(XENLOG_WARNING, "gnttab_transfer: "
- "Bad page %p: ed=%p(%u), "
- "sd=%p(%u),"
- " caf=%016lx, taf=%" PRtype_info
- " memflags 0x%x\n",
- (void *) page_to_mfn(page),
- d, d->domain_id,
- nd, nd->domain_id,
- x,
- page->u.inuse.type_info,
- memflags);
- }
- goto fail;
- }
-
- y = cmpxchg(&page->count_info, x, x & ~PGC_count_mask);
- } while (unlikely(y != x));
-
- /* Swizzle the owner then reinstate the PGC_allocated reference. */
- page_set_owner(page, NULL);
- y = page->count_info;
- do {
- x = y;
- BUG_ON((x & (PGC_count_mask | PGC_allocated)) != PGC_allocated);
- y = cmpxchg(&page->count_info, x, x | 1);
- } while (unlikely(y != x));
-
- /* Unlink from original owner. */
- if ( !(memflags & MEMF_no_refcount) )
- d->tot_pages--;
- page_list_del(page, &d->page_list);
-
- spin_unlock(&d->page_alloc_lock);
- perfc_incr(steal_page);
- return 0;
-
- fail:
- spin_unlock(&d->page_alloc_lock);
- MEM_LOG("Bad page %p: ed=%p(%u), sd=%p, caf=%016lx, taf=%" PRtype_info,
- (void *)page_to_mfn(page), d, d->domain_id,
- page_get_owner(page), page->count_info, page->u.inuse.type_info);
- return -1;
-}
-
-int
-donate_page(struct domain *d, struct page_info *page, unsigned int memflags)
-{
- /* needs to be implemented for transcendent memory (tmem) */
- ASSERT(0);
- return -ENOSYS;
-}
-
-static void
-__guest_physmap_add_page(struct domain *d, unsigned long gpfn,
- unsigned long mfn)
-{
- set_gpfn_from_mfn(mfn, gpfn);
- smp_mb();
- assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn,
- ASSIGN_writable | ASSIGN_pgc_allocated);
- if ( iommu_enabled && need_iommu(d) ){
- int i, j;
- j = 1 << (PAGE_SHIFT-PAGE_SHIFT_4K);
- for(i = 0 ; i < j; i++)
- iommu_map_page(d, gpfn*j + i, mfn*j + i,
- IOMMUF_readable|IOMMUF_writable);
- }
-}
-
-int
-guest_physmap_add_page(struct domain *d, unsigned long gpfn,
- unsigned long mfn, unsigned int page_order)
-{
- unsigned long i;
-
- for (i = 0; i < (1UL << page_order); i++) {
- BUG_ON(!mfn_valid(mfn));
- BUG_ON((mfn_to_page(mfn)->count_info & ~PGC_xen_heap) !=
- (PGC_allocated | 1));
- __guest_physmap_add_page(d, gpfn, mfn);
- mfn++;
- gpfn++;
- }
-
- perfc_incr(guest_physmap_add_page);
- return 0;
-}
-
-void
-guest_physmap_remove_page(struct domain *d, unsigned long gpfn,
- unsigned long mfn, unsigned int page_order)
-{
- unsigned long i;
-
- BUG_ON(mfn == 0);//XXX
-
- for (i = 0; i < (1UL << page_order); i++)
- zap_domain_page_one(d, (gpfn+i) << PAGE_SHIFT, 0, mfn+i);
-
- perfc_incr(guest_physmap_remove_page);
-}
-
-static void
-domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
- volatile pte_t* ptep, pte_t old_pte,
- struct page_info* page)
-{
-#ifdef CONFIG_XEN_IA64_TLB_TRACK
- struct tlb_track_entry* entry;
-#endif
-
- if (shadow_mode_enabled(d))
- shadow_mark_page_dirty(d, mpaddr >> PAGE_SHIFT);
-
-#ifndef CONFIG_XEN_IA64_TLB_TRACK
- //XXX sledgehammer.
- // flush finer range.
- domain_flush_vtlb_all(d);
- put_page(page);
-#else
- switch (tlb_track_search_and_remove(d->arch.tlb_track,
- ptep, old_pte, &entry)) {
- case TLB_TRACK_NOT_TRACKED:
- // dprintk(XENLOG_WARNING, "%s TLB_TRACK_NOT_TRACKED\n", __func__);
- /* This page is zapped from this domain
- * by memory decrease or exchange or dom0vp_zap_physmap.
- * I.e. the page is zapped for returning this page to xen
- * (balloon driver or DMA page allocation) or
- * foreign domain mapped page is unmapped from the domain.
- * In the former case the page is to be freed so that
- * we can defer freeing page to batch.
- * In the latter case the page is unmapped so that
- * we need to flush it. But to optimize it, we
- * queue the page and flush vTLB only once.
- * I.e. The caller must call dfree_flush() explicitly.
- */
- domain_flush_vtlb_all(d);
- put_page(page);
- break;
- case TLB_TRACK_NOT_FOUND:
- // dprintk(XENLOG_WARNING, "%s TLB_TRACK_NOT_FOUND\n", __func__);
- /* This page is zapped from this domain
- * by grant table page unmap.
- * Luckily the domain that mapped this page didn't
- * access this page so that we don't have to flush vTLB.
- * Probably the domain did only DMA.
- */
- /* do nothing */
- put_page(page);
- break;
- case TLB_TRACK_FOUND:
- // dprintk(XENLOG_WARNING, "%s TLB_TRACK_FOUND\n", __func__);
- /* This page is zapped from this domain
- * by grant table page unmap.
- * Fortunately this page is accessced via only one virtual
- * memory address. So it is easy to flush it.
- */
- domain_flush_vtlb_track_entry(d, entry);
- tlb_track_free_entry(d->arch.tlb_track, entry);
- put_page(page);
- break;
- case TLB_TRACK_MANY:
- gdprintk(XENLOG_INFO, "%s TLB_TRACK_MANY\n", __func__);
- /* This page is zapped from this domain
- * by grant table page unmap.
- * Unfortunately this page is accessced via many virtual
- * memory address (or too many times with single virtual address).
- * So we abondaned to track virtual addresses.
- * full vTLB flush is necessary.
- */
- domain_flush_vtlb_all(d);
- put_page(page);
- break;
- case TLB_TRACK_AGAIN:
- gdprintk(XENLOG_ERR, "%s TLB_TRACK_AGAIN\n", __func__);
- BUG();
- break;
- }
-#endif
- perfc_incr(domain_page_flush_and_put);
-}
-
-int
-domain_page_mapped(struct domain* d, unsigned long mpaddr)
-{
- volatile pte_t * pte;
-
- pte = lookup_noalloc_domain_pte(d, mpaddr);
- if(pte != NULL && !pte_none(*pte))
- return 1;
- return 0;
-}
-
-/* Flush cache of domain d. */
-void domain_cache_flush (struct domain *d, int sync_only)
-{
- struct mm_struct *mm = &d->arch.mm;
- volatile pgd_t *pgd = mm->pgd;
- unsigned long maddr;
- int i,j,k, l;
- int nbr_page = 0;
- void (*flush_func)(unsigned long start, unsigned long end);
- extern void flush_dcache_range (unsigned long, unsigned long);
-
- if (sync_only)
- flush_func = &flush_icache_range;
- else
- flush_func = &flush_dcache_range;
-
- for (i = 0; i < PTRS_PER_PGD; pgd++, i++) {
- volatile pud_t *pud;
- if (!pgd_present(*pgd)) // acquire semantics
- continue;
- pud = pud_offset(pgd, 0);
- for (j = 0; j < PTRS_PER_PUD; pud++, j++) {
- volatile pmd_t *pmd;
- if (!pud_present(*pud)) // acquire semantics
- continue;
- pmd = pmd_offset(pud, 0);
- for (k = 0; k < PTRS_PER_PMD; pmd++, k++) {
- volatile pte_t *pte;
- if (!pmd_present(*pmd)) // acquire semantics
- continue;
- pte = pte_offset_map(pmd, 0);
- for (l = 0; l < PTRS_PER_PTE; pte++, l++) {
- if (!pte_present(*pte)) // acquire semantics
- continue;
- /* Convert PTE to maddr. */
- maddr = __va_ul (pte_val(*pte)
- & _PAGE_PPN_MASK);
- (*flush_func)(maddr, maddr+ PAGE_SIZE);
- nbr_page++;
- }
- }
- }
- }
- //printk ("domain_cache_flush: %d %d pages\n", d->domain_id, nbr_page);
-}
-
-static void free_page_type(struct page_info *page, unsigned long type)
-{
-}
-
-static int alloc_page_type(struct page_info *page, unsigned long type)
-{
- return 1;
-}
-
-void *pgtable_quicklist_alloc(void)
-{
- struct page_info *page;
- void *p;
-
- BUG_ON(dom_p2m == NULL);
- page = alloc_domheap_page(dom_p2m, 0);
- if (page == NULL)
- return NULL;
-
- p = page_to_virt(page);
- clear_page(p);
- return p;
-}
-
-void pgtable_quicklist_free(void *pgtable_entry)
-{
- struct page_info* page = virt_to_page(pgtable_entry);
-
- BUG_ON(page_get_owner(page) != dom_p2m);
- BUG_ON(page->count_info != (1 | PGC_allocated));
-
- put_page(page);
-}
-
-void put_page_type(struct page_info *page)
-{
- u64 nx, x, y = page->u.inuse.type_info;
-
- again:
- do {
- x = y;
- nx = x - 1;
-
- ASSERT((x & PGT_count_mask) != 0);
-
- /*
- * The page should always be validated while a reference is held. The
- * exception is during domain destruction, when we forcibly invalidate
- * page-table pages if we detect a referential loop.
- * See domain.c:relinquish_list().
- */
- ASSERT((x & PGT_validated) || page_get_owner(page)->is_dying);
-
- if ( unlikely((nx & PGT_count_mask) == 0) )
- {
- /* Record TLB information for flush later. Races are harmless. */
- page->tlbflush_timestamp = tlbflush_current_time();
-
- if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
- likely(nx & PGT_validated) )
- {
- /*
- * Page-table pages must be unvalidated when count is zero. The
- * 'free' is safe because the refcnt is non-zero and validated
- * bit is clear => other ops will spin or fail.
- */
- if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
- x & ~PGT_validated)) != x) )
- goto again;
- /* We cleared the 'valid bit' so we do the clean up. */
- free_page_type(page, x);
- /* Carry on, but with the 'valid bit' now clear. */
- x &= ~PGT_validated;
- nx &= ~PGT_validated;
- }
- }
- }
- while ( unlikely((y = cmpxchg_rel(&page->u.inuse.type_info, x, nx)) != x) );
-}
-
-
-static int get_page_from_pagenr(unsigned long page_nr, struct domain *d)
-{
- struct page_info *page = mfn_to_page(page_nr);
-
- if ( unlikely(!mfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
- {
- MEM_LOG("Could not get page ref for pfn %lx", page_nr);
- return 0;
- }
-
- return 1;
-}
-
-
-int get_page_type(struct page_info *page, unsigned long type)
-{
- u64 nx, x, y = page->u.inuse.type_info;
-
- ASSERT(!(type & ~PGT_type_mask));
-
- again:
- do {
- x = y;
- nx = x + 1;
- if ( unlikely((nx & PGT_count_mask) == 0) )
- {
- MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
- return 0;
- }
- else if ( unlikely((x & PGT_count_mask) == 0) )
- {
- if ( (x & PGT_type_mask) != type )
- {
- /*
- * On type change we check to flush stale TLB entries. This
- * may be unnecessary (e.g., page was GDT/LDT) but those
- * circumstances should be very rare.
- */
- cpumask_t mask;
-
- cpumask_copy(&mask, page_get_owner(page)->domain_dirty_cpumask);
- tlbflush_filter(mask, page->tlbflush_timestamp);
-
- if ( unlikely(!cpumask_empty(&mask)) )
- {
- perfc_incr(need_flush_tlb_flush);
- flush_tlb_mask(&mask);
- }
-
- /* We lose existing type, back pointer, and validity. */
- nx &= ~(PGT_type_mask | PGT_validated);
- nx |= type;
-
- /* No special validation needed for writable pages. */
- /* Page tables and GDT/LDT need to be scanned for validity. */
- if ( type == PGT_writable_page )
- nx |= PGT_validated;
- }
- }
- else if ( unlikely((x & PGT_type_mask) != type) )
- {
- if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
- (type != PGT_l1_page_table) )
- MEM_LOG("Bad type (saw %08lx != exp %08lx) "
- "for mfn %016lx (pfn %016lx)",
- x, type, page_to_mfn(page),
- get_gpfn_from_mfn(page_to_mfn(page)));
- return 0;
- }
- else if ( unlikely(!(x & PGT_validated)) )
- {
- /* Someone else is updating validation of this page. Wait... */
- while ( (y = page->u.inuse.type_info) == x )
- cpu_relax();
- goto again;
- }
- }
- while ( unlikely((y = cmpxchg_acq(&page->u.inuse.type_info, x, nx)) != x) );
-
- if ( unlikely(!(nx & PGT_validated)) )
- {
- /* Try to validate page type; drop the new reference on failure. */
- if ( unlikely(!alloc_page_type(page, type)) )
- {
- MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %08lx"
- ": caf=%016lx taf=%" PRtype_info,
- page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
- type, page->count_info, page->u.inuse.type_info);
- /* Noone else can get a reference. We hold the only ref. */
- page->u.inuse.type_info = 0;
- return 0;
- }
-
- /* Noone else is updating simultaneously. */
- __set_bit(_PGT_validated, &page->u.inuse.type_info);
- }
-
- return 1;
-}
-
-int page_is_ram_type(unsigned long mfn, unsigned long type)
-{
- u32 mem_type = efi_mem_type(pfn_to_paddr(mfn));
-
- if (type & RAM_TYPE_CONVENTIONAL)
- {
- switch (mem_type)
- {
- case EFI_BOOT_SERVICES_CODE:
- case EFI_BOOT_SERVICES_DATA:
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- case EFI_CONVENTIONAL_MEMORY:
- return 1;
- default:
- break;
- }
- }
- if (type & RAM_TYPE_RESERVED)
- {
- switch (mem_type)
- {
- case EFI_RUNTIME_SERVICES_CODE:
- case EFI_RUNTIME_SERVICES_DATA:
- case EFI_RESERVED_TYPE:
- case EFI_MEMORY_MAPPED_IO:
- case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
- case EFI_PAL_CODE:
- return 1;
- default:
- break;
- }
- }
- if (type & RAM_TYPE_ACPI)
- {
- switch (mem_type)
- {
- case EFI_ACPI_RECLAIM_MEMORY:
- case EFI_ACPI_MEMORY_NVS:
- return 1;
- default:
- break;
- }
- }
- else if (type & RAM_TYPE_UNUSABLE)
- {
- return (mem_type == EFI_UNUSABLE_MEMORY);
- }
-
- return 0;
-}
-
-
-long
-arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
-{
- struct page_info *page = NULL;
- long rc;
-
- switch (op) {
- case XENMEM_add_to_physmap:
- {
- struct xen_add_to_physmap xatp;
- unsigned long prev_mfn, mfn = 0, gpfn;
- struct domain *d;
-
- if (copy_from_guest(&xatp, arg, 1))
- return -EFAULT;
-
- rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
- if (rc)
- return rc;
-
- /* This hypercall is used for VT-i domain only */
- if (!is_hvm_domain(d)) {
- rcu_unlock_domain(d);
- return -ENOSYS;
- }
-
- switch (xatp.space) {
- case XENMAPSPACE_shared_info:
- if (xatp.idx == 0)
- mfn = virt_to_mfn(d->shared_info);
- break;
- case XENMAPSPACE_grant_table:
- spin_lock(&d->grant_table->lock);
-
- if (d->grant_table->gt_version == 0)
- d->grant_table->gt_version = 1;
-
- if (d->grant_table->gt_version == 2 &&
- (xatp.idx & XENMAPIDX_grant_table_status))
- {
- xatp.idx &= ~XENMAPIDX_grant_table_status;
- if (xatp.idx < nr_status_frames(d->grant_table))
- mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
- }
- else
- {
- if ((xatp.idx >= nr_grant_frames(d->grant_table)) &&
- (xatp.idx < max_nr_grant_frames))
- gnttab_grow_table(d, xatp.idx + 1);
-
- if (xatp.idx < nr_grant_frames(d->grant_table))
- mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
- }
-
- spin_unlock(&d->grant_table->lock);
- break;
- case XENMAPSPACE_gmfn: {
- struct xen_ia64_memmap_info memmap_info;
- efi_memory_desc_t md;
- int ret;
-
- xatp.idx = gmfn_to_mfn(d, xatp.idx);
- if ( !get_page_from_pagenr(xatp.idx, d) )
- break;
-
- mfn = xatp.idx;
- page = mfn_to_page(mfn);
-
- memmap_info.efi_memmap_size = sizeof(md);
- memmap_info.efi_memdesc_size = sizeof(md);
- memmap_info.efi_memdesc_version =
- EFI_MEMORY_DESCRIPTOR_VERSION;
-
- md.type = EFI_CONVENTIONAL_MEMORY;
- md.pad = 0;
- md.phys_addr = xatp.gpfn << PAGE_SHIFT;
- md.virt_addr = 0;
- md.num_pages = 1UL << (PAGE_SHIFT - EFI_PAGE_SHIFT);
- md.attribute = EFI_MEMORY_WB;
-
- ret = __dom0vp_add_memdesc(d, &memmap_info, (char*)&md);
- if (ret != 0) {
- put_page(page);
- rcu_unlock_domain(d);
- gdprintk(XENLOG_DEBUG,
- "%s:%d td %d gpfn 0x%lx mfn 0x%lx ret %d\n",
- __func__, __LINE__,
- d->domain_id, xatp.gpfn, xatp.idx, ret);
- return ret;
- }
- break;
- }
- default:
- break;
- }
-
- if (mfn == 0) {
- if ( page )
- put_page(page);
- rcu_unlock_domain(d);
- return -EINVAL;
- }
-
- domain_lock(d);
-
- /* Check remapping necessity */
- prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
- if (mfn == prev_mfn)
- goto out;
-
- /* Remove previously mapped page if it was present. */
- if (prev_mfn && mfn_valid(prev_mfn)) {
- if (is_xen_heap_mfn(prev_mfn))
- /* Xen heap frames are simply unhooked from this phys slot. */
- guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
- else
- /* Normal domain memory is freed, to avoid leaking memory. */
- guest_remove_page(d, xatp.gpfn);
- }
-
- /* Unmap from old location, if any. */
- gpfn = get_gpfn_from_mfn(mfn);
- if (gpfn != INVALID_M2P_ENTRY)
- guest_physmap_remove_page(d, gpfn, mfn, 0);
-
- /* Map at new location. */
- /* Here page->count_info = PGC_allocated | N where N >= 1*/
- __guest_physmap_add_page(d, xatp.gpfn, mfn);
-
- out:
- domain_unlock(d);
-
- if ( page )
- put_page(page);
-
- rcu_unlock_domain(d);
-
- break;
- }
-
- case XENMEM_machine_memory_map:
- {
- struct xen_memory_map memmap;
- struct xen_ia64_memmap_info memmap_info;
- XEN_GUEST_HANDLE(char) buffer;
-
- if (!IS_PRIV(current->domain))
- return -EINVAL;
- if (copy_from_guest(&memmap, arg, 1))
- return -EFAULT;
- if (memmap.nr_entries <
- sizeof(memmap_info) + ia64_boot_param->efi_memmap_size)
- return -EINVAL;
-
- memmap.nr_entries =
- sizeof(memmap_info) + ia64_boot_param->efi_memmap_size;
- memset(&memmap_info, 0, sizeof(memmap_info));
- memmap_info.efi_memmap_size = ia64_boot_param->efi_memmap_size;
- memmap_info.efi_memdesc_size = ia64_boot_param->efi_memdesc_size;
- memmap_info.efi_memdesc_version = ia64_boot_param->efi_memdesc_version;
-
- buffer = guest_handle_cast(memmap.buffer, char);
- if (copy_to_guest(buffer, (char*)&memmap_info, sizeof(memmap_info)) ||
- copy_to_guest_offset(buffer, sizeof(memmap_info),
- (char*)__va(ia64_boot_param->efi_memmap),
- ia64_boot_param->efi_memmap_size) ||
- copy_to_guest(arg, &memmap, 1))
- return -EFAULT;
- return 0;
- }
-
- case XENMEM_get_pod_target:
- case XENMEM_set_pod_target: {
- /* XXX: PoD populate on demand isn't supported yet. */
- xen_pod_target_t target;
- struct domain *d;
-
- /* Support DOMID_SELF? */
- if ( !IS_PRIV(current->domain) )
- return -EINVAL;
-
- if ( copy_from_guest(&target, arg, 1) )
- return -EFAULT;
-
- rc = rcu_lock_target_domain_by_id(target.domid, &d);
- if ( rc != 0 )
- return rc;
-
- if ( op == XENMEM_set_pod_target )
- rc = xsm_set_pod_target(d);
- else
- rc = xsm_get_pod_target(d);
-
- if ( rc != 0 )
- goto pod_target_out_unlock;
-
- if ( op == XENMEM_set_pod_target )
- {
- /* if -ENOSYS is returned,
- domain builder aborts domain creation. */
- /* rc = -ENOSYS; */
- }
-
- target.tot_pages = d->tot_pages;
- target.pod_cache_pages = 0;
- target.pod_entries = 0;
-
- if ( copy_to_guest(arg, &target, 1) )
- {
- rc= -EFAULT;
- goto pod_target_out_unlock;
- }
-
- pod_target_out_unlock:
- rcu_unlock_domain(d);
- return rc;
- }
-
- default:
- return -ENOSYS;
- }
-
- return 0;
-}
-
-int is_iomem_page(unsigned long mfn)
-{
- return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io));
-}
-
-static void __xencomm_mark_dirty(struct domain *d,
- unsigned long addr, unsigned int len)
-{
- unsigned long gpfn;
- unsigned long end_addr = addr + len;
-
- if (shadow_mode_enabled(d)) {
- for (addr &= PAGE_MASK; addr < end_addr; addr += PAGE_SIZE) {
- gpfn = get_gpfn_from_mfn(virt_to_mfn(addr));
- shadow_mark_page_dirty(d, gpfn);
- }
- }
-}
-
-void xencomm_mark_dirty(unsigned long addr, unsigned int len)
-{
- __xencomm_mark_dirty(current->domain, addr, len);
-}
-
-/* stubs for populate on demand */
-int
-guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
- unsigned int order)
-{
- gdprintk(XENLOG_WARNING, "populate on demand isn't supported yet\n");
- return -ENOSYS;
-}
-
-int
-p2m_pod_decrease_reservation(struct domain *d, xen_pfn_t gpfn,
- unsigned int order)
-{
- gdprintk(XENLOG_WARNING, "populate on demand isn't supported yet\n");
- return 0;
-}
-
-/* Simple no-op */
-void arch_dump_shared_mem_info(void)
-{
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/ia64/xen/mm_init.c b/xen/arch/ia64/xen/mm_init.c
deleted file mode 100644
index ef6f2bfb58..0000000000
--- a/xen/arch/ia64/xen/mm_init.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Initialize MMU support.
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <xen/sched.h>
-#include <asm/vhpt.h>
-#include <asm/xenmca.h>
-#include <asm/meminit.h>
-#include <asm/page.h>
-
-struct ia64_mca_tlb_info ia64_mca_tlb_list[NR_CPUS];
-
-extern void ia64_tlb_init (void);
-
-#ifdef XEN
-cpumask_t percpu_set;
-#endif
-
-void __devinit
-ia64_mmu_init (void *my_cpu_data)
-{
- unsigned long psr, impl_va_bits;
- extern void __devinit tlb_init (void);
- int cpu = smp_processor_id();
-
- /* Pin mapping for percpu area into TLB */
- psr = ia64_clear_ic();
- ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
- pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
- PERCPU_PAGE_SHIFT);
-
- ia64_set_psr(psr);
- ia64_srlz_i();
-#ifdef XEN
- cpumask_set_cpu(cpu, &percpu_set);
-#endif
-
- /*
- * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
- * address space. The IA-64 architecture guarantees that at least 50 bits of
- * virtual address space are implemented but if we pick a large enough page size
- * (e.g., 64KB), the mapped address space is big enough that it will overlap with
- * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
- * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
- * problem in practice. Alternatively, we could truncate the top of the mapped
- * address space to not permit mappings that would overlap with the VMLPT.
- * --davidm 00/12/06
- */
-# define pte_bits 3
-# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
- /*
- * The virtual page table has to cover the entire implemented address space within
- * a region even though not all of this space may be mappable. The reason for
- * this is that the Access bit and Dirty bit fault handlers perform
- * non-speculative accesses to the virtual page table, so the address range of the
- * virtual page table itself needs to be covered by virtual page table.
- */
-# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
-# define POW2(n) (1ULL << (n))
-
- impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
-
- if (impl_va_bits < 51 || impl_va_bits > 61)
- panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
-
-#ifdef XEN
- vhpt_init();
-#endif
- ia64_tlb_init();
-
-#ifdef CONFIG_HUGETLB_PAGE
- ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
- ia64_srlz_d();
-#endif
-
- /* mca handler uses cr.lid as key to pick the right entry */
- ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
-
- /* insert this percpu data information into our list for MCA recovery purposes */
-#ifdef XEN
- ia64_mca_tlb_list[cpu].percpu_paddr = __pa(my_cpu_data);
-#else
- ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
- /* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
- ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
- ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
- ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
- ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
- ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
-#endif
-}
-
-void __init
-mem_init (void)
-{
-#ifdef CONFIG_PCI
- /*
- * This needs to be called _after_ the command line has been parsed but _before_
- * any drivers that may need the PCI DMA interface are initialized or bootmem has
- * been freed.
- */
- platform_dma_init();
-#endif
-
-}
diff --git a/xen/arch/ia64/xen/oprofile/Makefile b/xen/arch/ia64/xen/oprofile/Makefile
deleted file mode 100644
index 826b99b756..0000000000
--- a/xen/arch/ia64/xen/oprofile/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y += perfmon.o xenoprof.o
diff --git a/xen/arch/ia64/xen/oprofile/perfmon.c b/xen/arch/ia64/xen/oprofile/perfmon.c
deleted file mode 100644
index ada2b2c488..0000000000
--- a/xen/arch/ia64/xen/oprofile/perfmon.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/******************************************************************************
- * perfmon.c for xenoprof
- * This is based linux/arch/ia64/oprofile/perfmon.c, but heavily rewritten.
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-/**
- * @file perfmon.c
- *
- * @remark Copyright 2003 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <xen/event.h>
-#include <xen/xenoprof.h>
-#include <asm/perfmon.h>
-#include <asm/ptrace.h>
-#include <asm/vmx.h> /* for vmx_user_mode() */
-
-// XXX move them to an appropriate header file
-extern int is_active(struct domain *d);
-
-static int allow_virq;
-static int allow_ints;
-
-static int
-xenoprof_is_xen_mode(struct vcpu *v, struct pt_regs *regs)
-{
- if (VMX_DOMAIN(v))
- return !vmx_user_mode(regs);
- return ring_0(regs);
-}
-
-static int
-xenoprof_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg,
- struct pt_regs *regs, unsigned long stamp)
-{
- unsigned long ip = profile_pc(regs);
- int event = arg->pmd_eventid;
- struct vcpu *v = current;
- int mode = xenoprofile_get_mode(v, regs);
-
- // see pfm_do_interrupt_handler() in xen/arch/ia64/linux-xen/perfmon.c.
- // It always passes task as NULL. This is work around
- BUG_ON(task != NULL);
-
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1;
- if (!allow_virq || !allow_ints)
- return 0;
-
- // Note that log event actually expect cpu_user_regs, cast back
- // appropriately when doing the backtrace implementation in ia64
- xenoprof_log_event(v, regs, ip, mode, event);
- // send VIRQ_XENOPROF
- if (is_active(v->domain) && !xenoprof_is_xen_mode(v, regs) &&
- !is_idle_vcpu(v))
- send_guest_vcpu_virq(v, VIRQ_XENOPROF);
-
- return 0;
-}
-
-// same as linux OPROFILE_FMT_UUID
-#define XENOPROF_FMT_UUID { \
- 0x77, 0x7a, 0x6e, 0x61, 0x20, 0x65, 0x73, 0x69, 0x74, 0x6e, 0x72, 0x20, 0x61, 0x65, 0x0a, 0x6c }
-
-static pfm_buffer_fmt_t xenoprof_fmt = {
- .fmt_name = "xenoprof_format",
- .fmt_uuid = XENOPROF_FMT_UUID,
- .fmt_handler = xenoprof_handler,
-};
-
-static char * get_cpu_type(void)
-{
- __u8 family = local_cpu_data->family;
-
- switch (family) {
- case 0x07:
- return "ia64/itanium";
- case 0x1f:
- return "ia64/itanium2";
- default:
- return "ia64/ia64";
- }
-}
-
-static int using_xenoprof;
-
-int __init
-xenprof_perfmon_init(void)
-{
- int ret = pfm_register_buffer_fmt(&xenoprof_fmt);
- if (ret)
- return -ENODEV;
- using_xenoprof = 1;
- printk("xenoprof: using perfmon.\n");
- return 0;
-}
-__initcall(xenprof_perfmon_init);
-
-#ifdef notyet
-void xenoprof_perfmon_exit(void)
-{
- if (!using_xenoprof)
- return;
-
- pfm_unregister_buffer_fmt(xenoprof_fmt.fmt_uuid);
-}
-__exitcall(xenoprof_perfmon_exit);
-#endif
-
-///////////////////////////////////////////////////////////////////////////
-// glue methods for xenoprof and perfmon.
-int
-xenoprof_arch_init(int *num_events, char *cpu_type)
-{
- *num_events = 0;
- strlcpy(cpu_type, get_cpu_type(), XENOPROF_CPU_TYPE_SIZE);
- return 0;
-}
-
-int
-xenoprof_arch_reserve_counters(void)
-{
- // perfmon takes care
- return 0;
-}
-
-int
-xenoprof_arch_counter(XEN_GUEST_HANDLE(void) arg)
-{
- return -ENOSYS;
-}
-
-int
-xenoprof_arch_setup_events(void)
-{
- // perfmon takes care
- return 0;
-}
-
-//XXX SMP: sync by IPI?
-int
-xenoprof_arch_enable_virq(void)
-{
- allow_virq = 1;
- return 0;
-}
-
-//XXX SMP: sync by IPI?
-int
-xenoprof_arch_start(void)
-{
- allow_ints = 1;
- return 0;
-}
-
-//XXX SMP: sync by IPI?
-void
-xenoprof_arch_stop(void)
-{
- allow_ints = 0;
-}
-
-//XXX SMP: sync by IPI?
-void
-xenoprof_arch_disable_virq(void)
-{
- allow_virq = 0;
-}
-
-void
-xenoprof_arch_release_counters(void)
-{
- // perfmon takes care
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/ia64/xen/oprofile/xenoprof.c b/xen/arch/ia64/xen/oprofile/xenoprof.c
deleted file mode 100644
index 8635c7e676..0000000000
--- a/xen/arch/ia64/xen/oprofile/xenoprof.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/******************************************************************************
- * xenoprof.c
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <public/xen.h>
-#include <xen/xenoprof.h>
-#include <asm/vmx.h> /* for vmx_user_mode() */
-
-int
-xenoprofile_get_mode(const struct vcpu *v, const struct cpu_user_regs *regs)
-{
- int mode;
-
- /*
- * mode
- * 0: user, 1: kernel, 2: xen
- * see linux/driver/oprofile/cpu_buffer.h
- */
-#define CPU_MODE_USER 0
-#define CPU_MODE_KERNEL 1
-#define CPU_MODE_XEN 2
- if (VMX_DOMAIN(v)) {
- if (vmx_user_mode(regs)) {
- switch (ring(regs)) {
- case 3:
- mode = CPU_MODE_USER;
- break;
- case 0:
- mode = CPU_MODE_KERNEL;
- break;
- /* case 0: case 1: */
- default:
- gdprintk(XENLOG_ERR, "%s:%d ring%d in vmx is used!\n",
- __func__, __LINE__, ring(regs));
- mode = CPU_MODE_KERNEL; /* fall back to kernel mode. */
- break;
- }
- } else {
- mode = CPU_MODE_XEN;
- BUG_ON(ring(regs) != 0);
- }
- } else {
- switch (ring(regs)) {
- case 3:
- mode = CPU_MODE_USER;
- break;
- case CONFIG_CPL0_EMUL:
- mode = CPU_MODE_KERNEL;
- break;
- case 0:
- mode = CPU_MODE_XEN;
- break;
- default:
- gdprintk(XENLOG_ERR, "%s:%d ring%d in pv is used!\n", __func__,
- __LINE__, 3 - CONFIG_CPL0_EMUL);
- mode = CPU_MODE_KERNEL; /* fall back to kernel mode. */
- break;
- }
- }
- return mode;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/ia64/xen/pcdp.c b/xen/arch/ia64/xen/pcdp.c
deleted file mode 100644
index 102f2c4e3e..0000000000
--- a/xen/arch/ia64/xen/pcdp.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Parse the EFI PCDP table to locate the console device.
- *
- * (c) Copyright 2002, 2003, 2004 Hewlett-Packard Development Company, L.P.
- * Khalid Aziz <khalid.aziz@hp.com>
- * Alex Williamson <alex.williamson@hp.com>
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/acpi.h>
-#include <linux/console.h>
-#include <linux/efi.h>
-#include <linux/serial.h>
-#ifdef XEN
-#include <linux/efi.h>
-#include <linux/errno.h>
-#include <asm/io.h>
-#include <asm/iosapic.h>
-#include <asm/system.h>
-#include <acpi/acpi.h>
-#endif
-#include "pcdp.h"
-
-#ifdef XEN
-extern struct ns16550_defaults ns16550_com1;
-extern unsigned int ns16550_com1_gsi;
-extern unsigned int ns16550_com1_polarity;
-extern unsigned int ns16550_com1_trigger;
-
-/*
- * This is kind of ugly, but older rev HCDP tables don't provide interrupt
- * polarity and trigger information. Linux/ia64 discovers these properties
- * later via ACPI names, but we don't have that luxury in Xen/ia64. Since
- * all future platforms should have newer PCDP tables, this should be a
- * fixed list of boxes in the field, so we can hardcode based on the model.
- */
-static void __init
-pcdp_hp_irq_fixup(struct pcdp *pcdp, struct pcdp_uart *uart)
-{
- efi_system_table_t *systab;
- efi_config_table_t *tables;
- struct acpi_table_rsdp *rsdp = NULL;
- struct acpi_table_xsdt *xsdt;
- struct acpi_table_header *hdr;
- int i;
-
- if (pcdp->rev >= 3 || strcmp((char *)pcdp->oemid, "HP"))
- return;
-
- /*
- * Manually walk firmware provided tables to get to the XSDT.
- * The OEM table ID on the XSDT is the platform model string.
- * We only care about ACPI 2.0 tables as that's all HP provides.
- */
- systab = __va(ia64_boot_param->efi_systab);
-
- if (!systab || systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
- return;
-
- tables = __va(systab->tables);
-
- for (i = 0 ; i < (int)systab->nr_tables && !rsdp ; i++) {
- if (efi_guidcmp(tables[i].guid, ACPI_20_TABLE_GUID) == 0)
- rsdp =
- (struct acpi_table_rsdp *)__va(tables[i].table);
- }
-
- if (!rsdp ||
- strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1))
- return;
-
- xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
- hdr = &xsdt->header;
-
- if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1))
- return;
-
- /* Sanity check; are we still looking at HP firmware tables? */
- if (strcmp(hdr->oem_id, "HP"))
- return;
-
- if (!strcmp(hdr->oem_table_id, "zx2000") ||
- !strcmp(hdr->oem_table_id, "zx6000") ||
- !strcmp(hdr->oem_table_id, "rx2600") ||
- !strcmp(hdr->oem_table_id, "cx2600")) {
-
- ns16550_com1.irq = ns16550_com1_gsi = uart->gsi;
- ns16550_com1_polarity = IOSAPIC_POL_HIGH;
- ns16550_com1_trigger = IOSAPIC_EDGE;
-
- } else if (!strcmp(hdr->oem_table_id, "rx2620") ||
- !strcmp(hdr->oem_table_id, "cx2620") ||
- !strcmp(hdr->oem_table_id, "rx1600") ||
- !strcmp(hdr->oem_table_id, "rx1620")) {
-
- ns16550_com1.irq = ns16550_com1_gsi = uart->gsi;
- ns16550_com1_polarity = IOSAPIC_POL_LOW;
- ns16550_com1_trigger = IOSAPIC_LEVEL;
- }
-}
-
-static void __init
-setup_pcdp_irq(struct pcdp *pcdp, struct pcdp_uart *uart)
-{
- /* PCDP provides full interrupt info */
- if (pcdp->rev >= 3) {
- if (uart->flags & PCDP_UART_IRQ) {
- ns16550_com1.irq = ns16550_com1_gsi = uart->gsi,
- ns16550_com1_polarity =
- uart->flags & PCDP_UART_ACTIVE_LOW ?
- IOSAPIC_POL_LOW : IOSAPIC_POL_HIGH;
- ns16550_com1_trigger =
- uart->flags & PCDP_UART_EDGE_SENSITIVE ?
- IOSAPIC_EDGE : IOSAPIC_LEVEL;
- }
- return;
- }
-
- /* HCDP support */
- if (uart->pci_func & PCDP_UART_IRQ) {
- /*
- * HCDP tables don't provide interrupt polarity/trigger
- * info. If the UART is a PCI device, we know to program
- * it as low/level. Otherwise rely on platform hacks or
- * default to polling (irq = 0).
- */
- if (uart->pci_func & PCDP_UART_PCI) {
- ns16550_com1.irq = ns16550_com1_gsi = uart->gsi;
- ns16550_com1_polarity = IOSAPIC_POL_LOW;
- ns16550_com1_trigger = IOSAPIC_LEVEL;
- } else if (!strcmp((char *)pcdp->oemid, "HP"))
- pcdp_hp_irq_fixup(pcdp, uart);
- }
-}
-
-static int __init
-setup_serial_console(struct pcdp_uart *uart)
-{
-
- ns16550_com1.baud = uart->baud ? uart->baud : BAUD_AUTO;
- ns16550_com1.io_base = uart->addr.address;
- if (uart->bits)
- ns16550_com1.data_bits = uart->bits;
-
-#ifndef XEN
- setup_pcdp_irq(efi.hcdp, uart);
-
- /* Hide the HCDP table from dom0, xencons will be the console */
- efi.hcdp = NULL;
-#else
- setup_pcdp_irq(__va(efi.hcdp), uart);
-
- /* Hide the HCDP table from dom0, xencons will be the console */
- efi.hcdp = EFI_INVALID_TABLE_ADDR;
-#endif
-
- return 0;
-}
-
-static int __init
-setup_vga_console(struct pcdp_vga *vga)
-{
-#ifdef CONFIG_VGA
- /*
- * There was no console= in the original cmdline, and the PCDP
- * is telling us VGA is the primary console. We can call
- * cmdline_parse() manually to make things appear automagic.
- *
- * NB - cmdline_parse() expects the first part of the cmdline
- * to be the image name. So "pcdp" below is just filler.
- */
- char *console_cmdline = "pcdp console=vga";
-
- cmdline_parse(console_cmdline);
-
- /*
- * Leave efi.hcdp intact since dom0 will take ownership.
- * vga=keep is handled in start_kernel().
- */
-
- return 0;
-#else
- return -ENODEV;
-#endif
-}
-
-#else /* XEN */
-
-static int __init
-setup_serial_console(struct pcdp_uart *uart)
-{
-#ifdef CONFIG_SERIAL_8250_CONSOLE
- int mmio;
- static char options[64];
-
- mmio = (uart->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
- snprintf(options, sizeof(options), "console=uart,%s,0x%lx,%lun%d",
- mmio ? "mmio" : "io", uart->addr.address, uart->baud,
- uart->bits ? uart->bits : 8);
-
- return early_serial_console_init(options);
-#else
- return -ENODEV;
-#endif
-}
-
-static int __init
-setup_vga_console(struct pcdp_vga *vga)
-{
-#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
- if (efi_mem_type(0xA0000) == EFI_CONVENTIONAL_MEMORY) {
- printk(KERN_ERR "PCDP: VGA selected, but frame buffer is not MMIO!\n");
- return -ENODEV;
- }
-
- conswitchp = &vga_con;
- printk(KERN_INFO "PCDP: VGA console\n");
- return 0;
-#else
- return -ENODEV;
-#endif
-}
-#endif /* XEN */
-
-int __init
-efi_setup_pcdp_console(char *cmdline)
-{
- struct pcdp *pcdp;
- struct pcdp_uart *uart;
- struct pcdp_device *dev, *end;
- int i, serial = 0;
-
-#ifndef XEN
- pcdp = efi.hcdp;
- if (!pcdp)
- return -ENODEV;
-#else
- if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
- return -ENODEV;
- pcdp = __va(efi.hcdp);
-#endif
-
- printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, __pa(pcdp));
-
- if (strstr(cmdline, "console=hcdp")) {
- if (pcdp->rev < 3)
- serial = 1;
- } else if (strstr(cmdline, "console=")) {
- printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n");
- return -ENODEV;
- }
-
- if (pcdp->rev < 3 && efi_uart_console_only())
- serial = 1;
-
- for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) {
- if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) {
- if (uart->type == PCDP_CONSOLE_UART) {
- return setup_serial_console(uart);
-
- }
- }
- }
-
- end = (struct pcdp_device *) ((u8 *) pcdp + pcdp->length);
- for (dev = (struct pcdp_device *) (pcdp->uart + pcdp->num_uarts);
- dev < end;
- dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) {
- if (dev->flags & PCDP_PRIMARY_CONSOLE) {
- if (dev->type == PCDP_CONSOLE_VGA) {
- return setup_vga_console((struct pcdp_vga *) dev);
- }
- }
- }
-
- return -ENODEV;
-}
diff --git a/xen/arch/ia64/xen/pci.c b/xen/arch/ia64/xen/pci.c
deleted file mode 100644
index 43aa1e91ca..0000000000
--- a/xen/arch/ia64/xen/pci.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * pci.c - Low-Level PCI Access in IA-64
- *
- * Derived from bios32.c of i386 tree.
- *
- * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- * Copyright (C) 2004 Silicon Graphics, Inc.
- *
- * Note: Above list of copyright holders is incomplete...
- */
-
-#include <xen/pci.h>
-#include <xen/pci_regs.h>
-#include <xen/spinlock.h>
-
-#include <asm/io.h>
-#include <asm/sal.h>
-#include <asm/hw_irq.h>
-
-/*
- * Low-level SAL-based PCI configuration access functions. Note that SAL
- * calls are already serialized (via sal_lock), so we don't need another
- * synchronization mechanism here.
- */
-
-#define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
- (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
-
-/* SAL 3.2 adds support for extended config space. */
-
-#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
- (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
-
-static int
-pci_sal_read (unsigned int seg, unsigned int bus, unsigned int devfn,
- int reg, int len, u32 *value)
-{
- u64 addr, data = 0;
- int mode, result;
-
- if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
- return -EINVAL;
-
- if ((seg | reg) <= 255) {
- addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
- mode = 0;
- } else {
- addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
- mode = 1;
- }
- result = ia64_sal_pci_config_read(addr, mode, len, &data);
- if (result != 0)
- return -EINVAL;
-
- *value = (u32) data;
- return 0;
-}
-
-static int
-pci_sal_write (unsigned int seg, unsigned int bus, unsigned int devfn,
- int reg, int len, u32 value)
-{
- u64 addr;
- int mode, result;
-
- if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
- return -EINVAL;
-
- if ((seg | reg) <= 255) {
- addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
- mode = 0;
- } else {
- addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
- mode = 1;
- }
- result = ia64_sal_pci_config_write(addr, mode, len, value);
- if (result != 0)
- return -EINVAL;
- return 0;
-}
-
-
-uint8_t pci_conf_read8(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg)
-{
- uint32_t value;
- BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255));
- pci_sal_read(seg, bus, (dev<<3)|func, reg, 1, &value);
- return (uint8_t)value;
-}
-
-uint16_t pci_conf_read16(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg)
-{
- uint32_t value;
- BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255));
- pci_sal_read(seg, bus, (dev<<3)|func, reg, 2, &value);
- return (uint16_t)value;
-}
-
-uint32_t pci_conf_read32(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg)
-{
- uint32_t value;
- BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255));
- pci_sal_read(seg, bus, (dev<<3)|func, reg, 4, &value);
- return (uint32_t)value;
-}
-
-void pci_conf_write8(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg, uint8_t data)
-{
- BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255));
- pci_sal_write(seg, bus, (dev<<3)|func, reg, 1, data);
-}
-
-void pci_conf_write16(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg, uint16_t data)
-{
- BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255));
- pci_sal_write(seg, bus, (dev<<3)|func, reg, 2, data);
-}
-
-void pci_conf_write32(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg, uint32_t data)
-{
- BUG_ON((seg > 65535) || (bus > 255) || (dev > 31) || (func > 7) || (reg > 255));
- pci_sal_write(seg, bus, (dev<<3)|func, reg, 4, data);
-}
diff --git a/xen/arch/ia64/xen/platform_hypercall.c b/xen/arch/ia64/xen/platform_hypercall.c
deleted file mode 100644
index 537476aa8b..0000000000
--- a/xen/arch/ia64/xen/platform_hypercall.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/******************************************************************************
- * platform_hypercall.c
- *
- * Hardware platform operations. Intended for use by domain-0 kernel.
- *
- * Copyright (c) 2002-2006, K Fraser
- */
-
-#include <xen/config.h>
-#include <xen/types.h>
-#include <xen/lib.h>
-#include <xen/sched.h>
-#include <xen/domain.h>
-#include <xen/guest_access.h>
-#include <xen/acpi.h>
-#include <public/platform.h>
-#include <acpi/cpufreq/processor_perf.h>
-
-DEFINE_SPINLOCK(xenpf_lock);
-
-extern int set_px_pminfo(uint32_t cpu, struct xen_processor_performance *perf);
-extern long set_cx_pminfo(uint32_t cpu, struct xen_processor_power *power);
-
-long do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
-{
- long ret = 0;
- struct xen_platform_op curop, *op = &curop;
-
- if ( !IS_PRIV(current->domain) )
- return -EPERM;
-
- if ( copy_from_guest(op, u_xenpf_op, 1) )
- return -EFAULT;
-
- if ( op->interface_version != XENPF_INTERFACE_VERSION )
- return -EACCES;
-
- switch ( op->cmd )
- {
- case XENPF_set_processor_pminfo:
- spin_lock(&xenpf_lock);
- switch ( op->u.set_pminfo.type )
- {
- case XEN_PM_PX:
- if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) )
- {
- ret = -ENOSYS;
- break;
- }
- ret = set_px_pminfo(op->u.set_pminfo.id,
- &op->u.set_pminfo.u.perf);
- break;
-
- case XEN_PM_CX:
- /* Place holder for Cx */
- ret = -ENOSYS;
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
- spin_unlock(&xenpf_lock);
- break;
-
- default:
- printk("Unknown platform hypercall op 0x%x\n", op->cmd);
- ret = -ENOSYS;
- break;
- }
-
- return ret;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
-
diff --git a/xen/arch/ia64/xen/privop.c b/xen/arch/ia64/xen/privop.c
deleted file mode 100644
index a7a91071f4..0000000000
--- a/xen/arch/ia64/xen/privop.c
+++ /dev/null
@@ -1,889 +0,0 @@
-/*
- * Privileged operation "API" handling functions.
- *
- * Copyright (C) 2004 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- *
- */
-
-#include <asm/privop.h>
-#include <asm/vcpu.h>
-#include <asm/processor.h>
-#include <asm/delay.h> // Debug only
-#include <asm/dom_fw.h>
-#include <asm/vhpt.h>
-#include <asm/bundle.h>
-#include <asm/debugger.h>
-#include <xen/perfc.h>
-
-static const long priv_verbose = 0;
-
-/* Set to 1 to handle privified instructions from the privify tool. */
-#ifndef CONFIG_PRIVIFY
-static const int privify_en = 0;
-#else
-static const int privify_en = 1;
-#endif
-
-/**************************************************************************
-Privileged operation emulation routines
-**************************************************************************/
-
-static IA64FAULT priv_rfi(VCPU * vcpu, INST64 inst)
-{
- REGS *regs = vcpu_regs(vcpu);
- if (PSCB(vcpu, ifs) > 0x8000000000000000UL
- && regs->cr_ifs > 0x8000000000000000UL) {
- panic_domain(regs,
- "rfi emulation with double uncover is "
- "impossible - use hyperprivop\n"
- " ip=0x%lx vifs=0x%lx ifs=0x%lx\n",
- regs->cr_iip, PSCB(vcpu, ifs), regs->cr_ifs);
- }
- return vcpu_rfi(vcpu);
-}
-
-static IA64FAULT priv_bsw0(VCPU * vcpu, INST64 inst)
-{
- return vcpu_bsw0(vcpu);
-}
-
-static IA64FAULT priv_bsw1(VCPU * vcpu, INST64 inst)
-{
- return vcpu_bsw1(vcpu);
-}
-
-static IA64FAULT priv_cover(VCPU * vcpu, INST64 inst)
-{
- return vcpu_cover(vcpu);
-}
-
-static IA64FAULT priv_ptc_l(VCPU * vcpu, INST64 inst)
-{
- u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
- u64 log_range;
-
- log_range = ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
- return vcpu_ptc_l(vcpu, vadr, log_range);
-}
-
-static IA64FAULT priv_ptc_e(VCPU * vcpu, INST64 inst)
-{
- unsigned int src = inst.M28.r3;
-
- // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
- if (privify_en && src > 63)
- return vcpu_fc(vcpu, vcpu_get_gr(vcpu, src - 64));
- return vcpu_ptc_e(vcpu, vcpu_get_gr(vcpu, src));
-}
-
-static IA64FAULT priv_ptc_g(VCPU * vcpu, INST64 inst)
-{
- u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
- u64 addr_range;
-
- addr_range = 1 << ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
- return vcpu_ptc_g(vcpu, vadr, addr_range);
-}
-
-static IA64FAULT priv_ptc_ga(VCPU * vcpu, INST64 inst)
-{
- u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
- u64 addr_range;
-
- addr_range = 1 << ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
- return vcpu_ptc_ga(vcpu, vadr, addr_range);
-}
-
-static IA64FAULT priv_ptr_d(VCPU * vcpu, INST64 inst)
-{
- u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
- u64 log_range;
-
- log_range = (vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2;
- return vcpu_ptr_d(vcpu, vadr, log_range);
-}
-
-static IA64FAULT priv_ptr_i(VCPU * vcpu, INST64 inst)
-{
- u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
- u64 log_range;
-
- log_range = (vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2;
- return vcpu_ptr_i(vcpu, vadr, log_range);
-}
-
-static IA64FAULT priv_tpa(VCPU * vcpu, INST64 inst)
-{
- u64 padr;
- unsigned int fault;
- unsigned int src = inst.M46.r3;
-
- // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
- if (privify_en && src > 63)
- fault = vcpu_ttag(vcpu, vcpu_get_gr(vcpu, src - 64), &padr);
- else
- fault = vcpu_tpa(vcpu, vcpu_get_gr(vcpu, src), &padr);
- if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0);
- else
- return fault;
-}
-
-static IA64FAULT priv_tak(VCPU * vcpu, INST64 inst)
-{
- u64 key;
- unsigned int fault;
- unsigned int src = inst.M46.r3;
-
- // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
- if (privify_en && src > 63)
- fault = vcpu_thash(vcpu, vcpu_get_gr(vcpu, src - 64), &key);
- else
- fault = vcpu_tak(vcpu, vcpu_get_gr(vcpu, src), &key);
- if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M46.r1, key, 0);
- else
- return fault;
-}
-
-/************************************
- * Insert translation register/cache
-************************************/
-
-static IA64FAULT priv_itr_d(VCPU * vcpu, INST64 inst)
-{
- u64 fault, itir, ifa, pte, slot;
-
- //if (!vcpu_get_psr_ic(vcpu))
- // return IA64_ILLOP_FAULT;
- fault = vcpu_get_itir(vcpu, &itir);
- if (fault != IA64_NO_FAULT)
- return IA64_ILLOP_FAULT;
- fault = vcpu_get_ifa(vcpu, &ifa);
- if (fault != IA64_NO_FAULT)
- return IA64_ILLOP_FAULT;
- pte = vcpu_get_gr(vcpu, inst.M42.r2);
- slot = vcpu_get_gr(vcpu, inst.M42.r3);
-
- return vcpu_itr_d(vcpu, slot, pte, itir, ifa);
-}
-
-static IA64FAULT priv_itr_i(VCPU * vcpu, INST64 inst)
-{
- u64 fault, itir, ifa, pte, slot;
-
- //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
- fault = vcpu_get_itir(vcpu, &itir);
- if (fault != IA64_NO_FAULT)
- return IA64_ILLOP_FAULT;
- fault = vcpu_get_ifa(vcpu, &ifa);
- if (fault != IA64_NO_FAULT)
- return IA64_ILLOP_FAULT;
- pte = vcpu_get_gr(vcpu, inst.M42.r2);
- slot = vcpu_get_gr(vcpu, inst.M42.r3);
-
- return vcpu_itr_i(vcpu, slot, pte, itir, ifa);
-}
-
-static IA64FAULT priv_itc_d(VCPU * vcpu, INST64 inst)
-{
- u64 fault, itir, ifa, pte;
-
- //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
- fault = vcpu_get_itir(vcpu, &itir);
- if (fault != IA64_NO_FAULT)
- return IA64_ILLOP_FAULT;
- fault = vcpu_get_ifa(vcpu, &ifa);
- if (fault != IA64_NO_FAULT)
- return IA64_ILLOP_FAULT;
- pte = vcpu_get_gr(vcpu, inst.M41.r2);
-
- return vcpu_itc_d(vcpu, pte, itir, ifa);
-}
-
-static IA64FAULT priv_itc_i(VCPU * vcpu, INST64 inst)
-{
- u64 fault, itir, ifa, pte;
-
- //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
- fault = vcpu_get_itir(vcpu, &itir);
- if (fault != IA64_NO_FAULT)
- return IA64_ILLOP_FAULT;
- fault = vcpu_get_ifa(vcpu, &ifa);
- if (fault != IA64_NO_FAULT)
- return IA64_ILLOP_FAULT;
- pte = vcpu_get_gr(vcpu, inst.M41.r2);
-
- return vcpu_itc_i(vcpu, pte, itir, ifa);
-}
-
-/*************************************
- * Moves to semi-privileged registers
-*************************************/
-
-static IA64FAULT priv_mov_to_ar_imm(VCPU * vcpu, INST64 inst)
-{
- // I27 and M30 are identical for these fields
- u64 ar3 = inst.M30.ar3;
- u64 imm = vcpu_get_gr(vcpu, inst.M30.imm);
- return vcpu_set_ar(vcpu, ar3, imm);
-}
-
-static IA64FAULT priv_mov_to_ar_reg(VCPU * vcpu, INST64 inst)
-{
- // I26 and M29 are identical for these fields
- u64 ar3 = inst.M29.ar3;
-
- if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) {
- // privified mov from kr
- u64 val;
- if (vcpu_get_ar(vcpu, ar3, &val) != IA64_ILLOP_FAULT)
- return vcpu_set_gr(vcpu, inst.M29.r2 - 64, val, 0);
- else
- return IA64_ILLOP_FAULT;
- } else {
- u64 r2 = vcpu_get_gr(vcpu, inst.M29.r2);
- return vcpu_set_ar(vcpu, ar3, r2);
- }
-}
-
-/********************************
- * Moves to privileged registers
-********************************/
-
-static IA64FAULT priv_mov_to_pkr(VCPU * vcpu, INST64 inst)
-{
- u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- return vcpu_set_pkr(vcpu, r3, r2);
-}
-
-static IA64FAULT priv_mov_to_rr(VCPU * vcpu, INST64 inst)
-{
- u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- return vcpu_set_rr(vcpu, r3, r2);
-}
-
-static IA64FAULT priv_mov_to_dbr(VCPU * vcpu, INST64 inst)
-{
- u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- return vcpu_set_dbr(vcpu, r3, r2);
-}
-
-static IA64FAULT priv_mov_to_ibr(VCPU * vcpu, INST64 inst)
-{
- u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- return vcpu_set_ibr(vcpu, r3, r2);
-}
-
-static IA64FAULT priv_mov_to_pmc(VCPU * vcpu, INST64 inst)
-{
- u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- return vcpu_set_pmc(vcpu, r3, r2);
-}
-
-static IA64FAULT priv_mov_to_pmd(VCPU * vcpu, INST64 inst)
-{
- u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- return vcpu_set_pmd(vcpu, r3, r2);
-}
-
-static IA64FAULT priv_mov_to_cr(VCPU * vcpu, INST64 inst)
-{
- u64 val = vcpu_get_gr(vcpu, inst.M32.r2);
- perfc_incra(mov_to_cr, inst.M32.cr3);
- switch (inst.M32.cr3) {
- case 0:
- return vcpu_set_dcr(vcpu, val);
- case 1:
- return vcpu_set_itm(vcpu, val);
- case 2:
- return vcpu_set_iva(vcpu, val);
- case 8:
- return vcpu_set_pta(vcpu, val);
- case 16:
- return vcpu_set_ipsr(vcpu, val);
- case 17:
- return vcpu_set_isr(vcpu, val);
- case 19:
- return vcpu_set_iip(vcpu, val);
- case 20:
- return vcpu_set_ifa(vcpu, val);
- case 21:
- return vcpu_set_itir(vcpu, val);
- case 22:
- return vcpu_set_iipa(vcpu, val);
- case 23:
- return vcpu_set_ifs(vcpu, val);
- case 24:
- return vcpu_set_iim(vcpu, val);
- case 25:
- return vcpu_set_iha(vcpu, val);
- case 64:
- return vcpu_set_lid(vcpu, val);
- case 65:
- return IA64_ILLOP_FAULT;
- case 66:
- return vcpu_set_tpr(vcpu, val);
- case 67:
- return vcpu_set_eoi(vcpu, val);
- case 68:
- return IA64_ILLOP_FAULT;
- case 69:
- return IA64_ILLOP_FAULT;
- case 70:
- return IA64_ILLOP_FAULT;
- case 71:
- return IA64_ILLOP_FAULT;
- case 72:
- return vcpu_set_itv(vcpu, val);
- case 73:
- return vcpu_set_pmv(vcpu, val);
- case 74:
- return vcpu_set_cmcv(vcpu, val);
- case 80:
- return vcpu_set_lrr0(vcpu, val);
- case 81:
- return vcpu_set_lrr1(vcpu, val);
- default:
- return IA64_ILLOP_FAULT;
- }
-}
-
-static IA64FAULT priv_rsm(VCPU * vcpu, INST64 inst)
-{
- u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
- return vcpu_reset_psr_sm(vcpu, imm24);
-}
-
-static IA64FAULT priv_ssm(VCPU * vcpu, INST64 inst)
-{
- u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
- return vcpu_set_psr_sm(vcpu, imm24);
-}
-
-/**
- * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
- */
-static IA64FAULT priv_mov_to_psr(VCPU * vcpu, INST64 inst)
-{
- u64 val = vcpu_get_gr(vcpu, inst.M35.r2);
- return vcpu_set_psr_l(vcpu, val);
-}
-
-/**********************************
- * Moves from privileged registers
- **********************************/
-
-static IA64FAULT priv_mov_from_rr(VCPU * vcpu, INST64 inst)
-{
- u64 val;
- IA64FAULT fault;
- u64 reg;
-
- reg = vcpu_get_gr(vcpu, inst.M43.r3);
- if (privify_en && inst.M43.r1 > 63) {
- // privified mov from cpuid
- fault = vcpu_get_cpuid(vcpu, reg, &val);
- if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1 - 64, val, 0);
- } else {
- fault = vcpu_get_rr(vcpu, reg, &val);
- if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
- }
- return fault;
-}
-
-static IA64FAULT priv_mov_from_pkr(VCPU * vcpu, INST64 inst)
-{
- u64 val;
- IA64FAULT fault;
-
- fault = vcpu_get_pkr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
- if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
- else
- return fault;
-}
-
-static IA64FAULT priv_mov_from_dbr(VCPU * vcpu, INST64 inst)
-{
- u64 val;
- IA64FAULT fault;
-
- fault = vcpu_get_dbr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
- if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
- else
- return fault;
-}
-
-static IA64FAULT priv_mov_from_ibr(VCPU * vcpu, INST64 inst)
-{
- u64 val;
- IA64FAULT fault;
-
- fault = vcpu_get_ibr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
- if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
- else
- return fault;
-}
-
-static IA64FAULT priv_mov_from_pmc(VCPU * vcpu, INST64 inst)
-{
- u64 val;
- IA64FAULT fault;
- u64 reg;
-
- reg = vcpu_get_gr(vcpu, inst.M43.r3);
- if (privify_en && inst.M43.r1 > 63) {
- // privified mov from pmd
- fault = vcpu_get_pmd(vcpu, reg, &val);
- if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1 - 64, val, 0);
- } else {
- fault = vcpu_get_pmc(vcpu, reg, &val);
- if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
- }
- return fault;
-}
-
-#define cr_get(cr) \
- ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
- vcpu_set_gr(vcpu, tgt, val, 0) : fault;
-
-static IA64FAULT priv_mov_from_cr(VCPU * vcpu, INST64 inst)
-{
- u64 tgt = inst.M33.r1;
- u64 val;
- IA64FAULT fault;
-
- perfc_incra(mov_from_cr, inst.M33.cr3);
- switch (inst.M33.cr3) {
- case 0:
- return cr_get(dcr);
- case 1:
- return cr_get(itm);
- case 2:
- return cr_get(iva);
- case 8:
- return cr_get(pta);
- case 16:
- return cr_get(ipsr);
- case 17:
- return cr_get(isr);
- case 19:
- return cr_get(iip);
- case 20:
- return cr_get(ifa);
- case 21:
- return cr_get(itir);
- case 22:
- return cr_get(iipa);
- case 23:
- return cr_get(ifs);
- case 24:
- return cr_get(iim);
- case 25:
- return cr_get(iha);
- case 64:
- return cr_get(lid);
- case 65:
- return cr_get(ivr);
- case 66:
- return cr_get(tpr);
- case 67:
- return vcpu_set_gr(vcpu, tgt, 0L, 0);
- case 68:
- return cr_get(irr0);
- case 69:
- return cr_get(irr1);
- case 70:
- return cr_get(irr2);
- case 71:
- return cr_get(irr3);
- case 72:
- return cr_get(itv);
- case 73:
- return cr_get(pmv);
- case 74:
- return cr_get(cmcv);
- case 80:
- return cr_get(lrr0);
- case 81:
- return cr_get(lrr1);
- default:
- return IA64_ILLOP_FAULT;
- }
- return IA64_ILLOP_FAULT;
-}
-
-static IA64FAULT priv_mov_from_psr(VCPU * vcpu, INST64 inst)
-{
- u64 tgt = inst.M33.r1;
- u64 val;
- IA64FAULT fault;
-
- fault = vcpu_get_psr_masked(vcpu, &val);
- if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, tgt, val, 0);
- else
- return fault;
-}
-
-/**************************************************************************
-Privileged operation decode and dispatch routines
-**************************************************************************/
-
-static const IA64_SLOT_TYPE slot_types[0x20][3] = {
- {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
- {M, I, ILLEGAL}, {M, I, ILLEGAL},
- {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
- {M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
- {M, F, I}, {M, F, I},
- {M, M, F}, {M, M, F},
- {M, I, B}, {M, I, B},
- {M, B, B}, {M, B, B},
- {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
- {B, B, B}, {B, B, B},
- {M, M, B}, {M, M, B},
- {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
- {M, F, B}, {M, F, B},
- {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
-};
-
-// pointer to privileged emulation function
-typedef IA64FAULT(*PPEFCN) (VCPU * vcpu, INST64 inst);
-
-static const PPEFCN Mpriv_funcs[64] = {
- priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
- priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
- 0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
- priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
- priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr,
- priv_mov_from_pkr,
- priv_mov_from_pmc, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, priv_tpa, priv_tak,
- 0, 0, 0, 0,
- priv_mov_from_cr, priv_mov_from_psr, 0, 0,
- 0, 0, 0, 0,
- priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
- 0, 0, 0, 0,
- priv_ptc_e, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-static IA64FAULT priv_handle_op(VCPU * vcpu, REGS * regs)
-{
- IA64_BUNDLE bundle;
- int slot;
- IA64_SLOT_TYPE slot_type;
- INST64 inst;
- PPEFCN pfunc;
- unsigned long ipsr = regs->cr_ipsr;
- u64 iip = regs->cr_iip;
- int x6;
-
- // make a local copy of the bundle containing the privop
- if (!vcpu_get_domain_bundle(vcpu, regs, iip, &bundle)) {
- //return vcpu_force_data_miss(vcpu, regs->cr_iip);
- return vcpu_force_inst_miss(vcpu, regs->cr_iip);
- }
- slot = ((struct ia64_psr *)&ipsr)->ri;
- if (slot == 0)
- inst.inst = (bundle.i64[0] >> 5) & MASK_41;
- else if (slot == 1)
- inst.inst =
- ((bundle.i64[0] >> 46) | bundle.i64[1] << 18) & MASK_41;
- else if (slot == 2)
- inst.inst = (bundle.i64[1] >> 23) & MASK_41;
- else
- panic_domain(regs,
- "priv_handle_op: illegal slot: %d\n", slot);
-
- slot_type = slot_types[bundle.template][slot];
- if (priv_verbose) {
- printk("priv_handle_op: checking bundle at 0x%lx "
- "(op=0x%016lx) slot %d (type=%d)\n",
- iip, (u64) inst.inst, slot, slot_type);
- }
- if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
- // break instr for privified cover
- } else if (ia64_get_cpl(ipsr) > CONFIG_CPL0_EMUL)
- return IA64_ILLOP_FAULT;
-
- debugger_event(XEN_IA64_DEBUG_ON_PRIVOP);
-
- switch (slot_type) {
- case M:
- if (inst.generic.major == 0) {
-#if 0
- if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
- privcnt.cover++;
- return priv_cover(vcpu, inst);
- }
-#endif
- if (inst.M29.x3 != 0)
- break;
- if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
- perfc_incr(mov_to_ar_imm);
- return priv_mov_to_ar_imm(vcpu, inst);
- }
- if (inst.M44.x4 == 6) {
- perfc_incr(ssm);
- return priv_ssm(vcpu, inst);
- }
- if (inst.M44.x4 == 7) {
- perfc_incr(rsm);
- return priv_rsm(vcpu, inst);
- }
- break;
- } else if (inst.generic.major != 1)
- break;
- x6 = inst.M29.x6;
- if (x6 == 0x2a) {
- if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
- perfc_incr(mov_from_ar); // privified mov from kr
- else
- perfc_incr(mov_to_ar_reg);
- return priv_mov_to_ar_reg(vcpu, inst);
- }
- if (inst.M29.x3 != 0)
- break;
- if (!(pfunc = Mpriv_funcs[x6]))
- break;
- if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special"
- if (privify_en && inst.M46.r3 > 63) {
- if (x6 == 0x1e)
- x6 = 0x1b;
- else
- x6 = 0x1a;
- }
- }
- if (privify_en && x6 == 52 && inst.M28.r3 > 63)
- perfc_incr(fc);
- else if (privify_en && x6 == 16 && inst.M43.r3 > 63)
- perfc_incr(cpuid);
- else
- perfc_incra(misc_privop, x6);
- return (*pfunc) (vcpu, inst);
- break;
- case B:
- if (inst.generic.major != 0)
- break;
- if (inst.B8.x6 == 0x08) {
- IA64FAULT fault;
- perfc_incr(rfi);
- fault = priv_rfi(vcpu, inst);
- if (fault == IA64_NO_FAULT)
- fault = IA64_RFI_IN_PROGRESS;
- return fault;
- }
- if (inst.B8.x6 == 0x0c) {
- perfc_incr(bsw0);
- return priv_bsw0(vcpu, inst);
- }
- if (inst.B8.x6 == 0x0d) {
- perfc_incr(bsw1);
- return priv_bsw1(vcpu, inst);
- }
- if (privify_en && inst.B8.x6 == 0x0) {
- // break instr for privified cover
- perfc_incr(cover);
- return priv_cover(vcpu, inst);
- }
- break;
- case I:
- if (inst.generic.major != 0)
- break;
-#if 0
- if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
- perfc_incr(cover);
- return priv_cover(vcpu, inst);
- }
-#endif
- if (inst.I26.x3 != 0)
- break; // I26.x3 == I27.x3
- if (inst.I26.x6 == 0x2a) {
- if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
- perfc_incr(mov_from_ar); // privified mov from kr
- else
- perfc_incr(mov_to_ar_reg);
- return priv_mov_to_ar_reg(vcpu, inst);
- }
- if (inst.I27.x6 == 0x0a) {
- perfc_incr(mov_to_ar_imm);
- return priv_mov_to_ar_imm(vcpu, inst);
- }
- break;
- default:
- break;
- }
- //printk("We who are about do die salute you\n");
- printk("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) "
- "slot %d (type=%d), ipsr=0x%lx\n",
- iip, (u64) inst.inst, slot, slot_type, ipsr);
- //printk("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
- //thread_mozambique("privop fault\n");
- return IA64_ILLOP_FAULT;
-}
-
-/** Emulate a privileged operation.
- *
- * This should probably return 0 on success and the "trap number"
- * (e.g. illegal operation for bad register, priv op for an
- * instruction that isn't allowed, etc.) on "failure"
- *
- * @param vcpu virtual cpu
- * @param isrcode interrupt service routine code
- * @return fault
- */
-IA64FAULT priv_emulate(VCPU * vcpu, REGS * regs, u64 isr)
-{
- IA64FAULT fault;
- u64 isrcode = (isr >> 4) & 0xf;
-
- // handle privops masked as illops? and breaks (6)
- if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
- printk("priv_emulate: isrcode != 0 or 1 or 2\n");
- printk("priv_emulate: returning ILLOP, not implemented!\n");
- while (1) ;
- return IA64_ILLOP_FAULT;
- }
- // its OK for a privified-cover to be executed in user-land
- fault = priv_handle_op(vcpu, regs);
- if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) {
- // success!!
- // update iip/ipsr to point to the next instruction
- (void)vcpu_increment_iip(vcpu);
- }
- if (fault == IA64_ILLOP_FAULT)
- printk("priv_emulate: priv_handle_op fails, "
- "isr=0x%lx iip=%lx\n", isr, regs->cr_iip);
- return fault;
-}
-
-/* hyperprivops are generally executed in assembly (with physical psr.ic off)
- * so this code is primarily used for debugging them */
-int ia64_hyperprivop(unsigned long iim, REGS * regs)
-{
- struct vcpu *v = current;
- u64 val;
- u64 itir, ifa;
-
- if (!iim || iim > HYPERPRIVOP_MAX) {
- panic_domain(regs, "bad hyperprivop: iim=%lx, iip=0x%lx\n",
- iim, regs->cr_iip);
- return 1;
- }
- perfc_incra(slow_hyperprivop, iim);
-
- debugger_event(XEN_IA64_DEBUG_ON_PRIVOP);
-
- switch (iim) {
- case HYPERPRIVOP_RFI:
- vcpu_rfi(v);
- return 0; // don't update iip
- case HYPERPRIVOP_RSM_DT:
- vcpu_reset_psr_dt(v);
- return 1;
- case HYPERPRIVOP_SSM_DT:
- vcpu_set_psr_dt(v);
- return 1;
- case HYPERPRIVOP_COVER:
- vcpu_cover(v);
- return 1;
- case HYPERPRIVOP_ITC_D:
- vcpu_get_itir(v, &itir);
- vcpu_get_ifa(v, &ifa);
- vcpu_itc_d(v, regs->r8, itir, ifa);
- return 1;
- case HYPERPRIVOP_ITC_I:
- vcpu_get_itir(v, &itir);
- vcpu_get_ifa(v, &ifa);
- vcpu_itc_i(v, regs->r8, itir, ifa);
- return 1;
- case HYPERPRIVOP_SSM_I:
- vcpu_set_psr_i(v);
- return 1;
- case HYPERPRIVOP_GET_IVR:
- vcpu_get_ivr(v, &val);
- regs->r8 = val;
- return 1;
- case HYPERPRIVOP_GET_TPR:
- vcpu_get_tpr(v, &val);
- regs->r8 = val;
- return 1;
- case HYPERPRIVOP_SET_TPR:
- vcpu_set_tpr(v, regs->r8);
- return 1;
- case HYPERPRIVOP_EOI:
- vcpu_set_eoi(v, 0L);
- return 1;
- case HYPERPRIVOP_SET_ITM:
- vcpu_set_itm(v, regs->r8);
- return 1;
- case HYPERPRIVOP_THASH:
- vcpu_thash(v, regs->r8, &val);
- regs->r8 = val;
- return 1;
- case HYPERPRIVOP_PTC_GA:
- vcpu_ptc_ga(v, regs->r8, (1L << ((regs->r9 & 0xfc) >> 2)));
- return 1;
- case HYPERPRIVOP_ITR_D:
- vcpu_get_itir(v, &itir);
- vcpu_get_ifa(v, &ifa);
- vcpu_itr_d(v, regs->r8, regs->r9, itir, ifa);
- return 1;
- case HYPERPRIVOP_GET_RR:
- vcpu_get_rr(v, regs->r8, &val);
- regs->r8 = val;
- return 1;
- case HYPERPRIVOP_SET_RR:
- vcpu_set_rr(v, regs->r8, regs->r9);
- return 1;
- case HYPERPRIVOP_SET_KR:
- vcpu_set_ar(v, regs->r8, regs->r9);
- return 1;
- case HYPERPRIVOP_FC:
- vcpu_fc(v, regs->r8);
- return 1;
- case HYPERPRIVOP_GET_CPUID:
- vcpu_get_cpuid(v, regs->r8, &val);
- regs->r8 = val;
- return 1;
- case HYPERPRIVOP_GET_PMD:
- vcpu_get_pmd(v, regs->r8, &val);
- regs->r8 = val;
- return 1;
- case HYPERPRIVOP_GET_EFLAG:
- vcpu_get_ar(v, 24, &val);
- regs->r8 = val;
- return 1;
- case HYPERPRIVOP_SET_EFLAG:
- vcpu_set_ar(v, 24, regs->r8);
- return 1;
- case HYPERPRIVOP_RSM_BE:
- vcpu_reset_psr_sm(v, IA64_PSR_BE);
- return 1;
- case HYPERPRIVOP_GET_PSR:
- vcpu_get_psr_masked(v, &val);
- regs->r8 = val;
- return 1;
- case HYPERPRIVOP_SET_RR0_TO_RR4:
- vcpu_set_rr0_to_rr4(v, regs->r8, regs->r9, regs->r10,
- regs->r11, regs->r14);
- return 1;
- }
- return 0;
-}
diff --git a/xen/arch/ia64/xen/privop_stat.c b/xen/arch/ia64/xen/privop_stat.c
deleted file mode 100644
index e8e6fa9921..0000000000
--- a/xen/arch/ia64/xen/privop_stat.c
+++ /dev/null
@@ -1,153 +0,0 @@
-#include <xen/lib.h>
-#include <public/xen.h>
-#include <xen/perfc.h>
-#include <asm/atomic.h>
-#include <asm/privop_stat.h>
-
-#ifdef CONFIG_PRIVOP_ADDRS
-
-struct privop_addr_count {
- unsigned long addr[PRIVOP_COUNT_NADDRS];
- unsigned int count[PRIVOP_COUNT_NADDRS];
- unsigned int overflow;
-};
-
-struct privop_addr_info {
- enum perfcounter perfc_addr;
- enum perfcounter perfc_count;
- enum perfcounter perfc_overflow;
-};
-
-#define PERFCOUNTER(var, name)
-#define PERFCOUNTER_ARRAY(var, name, size)
-
-#define PERFSTATUS(var, name)
-#define PERFSTATUS_ARRAY(var, name, size)
-
-#define PERFPRIVOPADDR(name) \
- { \
- PERFC_privop_addr_##name##_addr, \
- PERFC_privop_addr_##name##_count, \
- PERFC_privop_addr_##name##_overflow \
- },
-
-static const struct privop_addr_info privop_addr_info[] = {
-#include <asm/perfc_defn.h>
-};
-
-#define PRIVOP_COUNT_NINSTS \
- (sizeof(privop_addr_info) / sizeof(privop_addr_info[0]))
-
-static DEFINE_PER_CPU(struct privop_addr_count[PRIVOP_COUNT_NINSTS], privop_addr_counter);
-
-void privop_count_addr(unsigned long iip, enum privop_inst inst)
-{
- struct privop_addr_count *v = this_cpu(privop_addr_counter) + inst;
- int i;
-
- if (inst >= PRIVOP_COUNT_NINSTS)
- return;
- for (i = 0; i < PRIVOP_COUNT_NADDRS; i++) {
- if (!v->addr[i]) {
- v->addr[i] = iip;
- v->count[i]++;
- return;
- }
- else if (v->addr[i] == iip) {
- v->count[i]++;
- return;
- }
- }
- v->overflow++;
-}
-
-void gather_privop_addrs(void)
-{
- unsigned int cpu;
-
- for_each_possible_cpu ( cpu ) {
- perfc_t *perfcounters = per_cpu(perfcounters, cpu);
- struct privop_addr_count *s = per_cpu(privop_addr_counter, cpu);
- int i, j;
-
- for (i = 0; i < PRIVOP_COUNT_NINSTS; i++, s++) {
- perfc_t *d;
-
- /* Note: addresses are truncated! */
- d = perfcounters + privop_addr_info[i].perfc_addr;
- for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
- d[j] = s->addr[j];
-
- d = perfcounters + privop_addr_info[i].perfc_count;
- for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
- d[j] = s->count[j];
-
- perfcounters[privop_addr_info[i].perfc_overflow] =
- s->overflow;
- }
- }
-}
-
-void reset_privop_addrs(void)
-{
- unsigned int cpu;
-
- for_each_possible_cpu ( cpu ) {
- struct privop_addr_count *v = per_cpu(privop_addr_counter, cpu);
- int i, j;
-
- for (i = 0; i < PRIVOP_COUNT_NINSTS; i++, v++) {
- for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
- v->addr[j] = v->count[j] = 0;
- v->overflow = 0;
- }
- }
-}
-#endif
-
-/**************************************************************************
-Privileged operation instrumentation routines
-**************************************************************************/
-
-#if 0
-static const char * const Mpriv_str[64] = {
- "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
- "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
- "<0x08>", "ptc_l", "ptc_g", "ptc_ga",
- "ptr_d", "ptr_i", "itr_d", "itr_i",
- "mov_from_rr", "mov_from_dbr", "mov_from_ibr", "mov_from_pkr",
- "mov_from_pmc", "<0x15>", "<0x16>", "<0x17>",
- "<0x18>", "<0x19>", "privified-thash", "privified-ttag",
- "<0x1c>", "<0x1d>", "tpa", "tak",
- "<0x20>", "<0x21>", "<0x22>", "<0x23>",
- "mov_from_cr", "mov_from_psr", "<0x26>", "<0x27>",
- "<0x28>", "<0x29>", "<0x2a>", "<0x2b>",
- "mov_to_cr", "mov_to_psr", "itc_d", "itc_i",
- "<0x30>", "<0x31>", "<0x32>", "<0x33>",
- "ptc_e", "<0x35>", "<0x36>", "<0x37>",
- "<0x38>", "<0x39>", "<0x3a>", "<0x3b>",
- "<0x3c>", "<0x3d>", "<0x3e>", "<0x3f>"
-};
-
-#define RS "Rsvd"
-static const char * const cr_str[128] = {
- "dcr","itm","iva",RS,RS,RS,RS,RS,
- "pta",RS,RS,RS,RS,RS,RS,RS,
- "ipsr","isr",RS,"iip","ifa","itir","iipa","ifs",
- "iim","iha",RS,RS,RS,RS,RS,RS,
- RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
- RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
- "lid","ivr","tpr","eoi","irr0","irr1","irr2","irr3",
- "itv","pmv","cmcv",RS,RS,RS,RS,RS,
- "lrr0","lrr1",RS,RS,RS,RS,RS,RS,
- RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
- RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
- RS,RS,RS,RS,RS,RS,RS,RS
-};
-
-static const char * const hyperpriv_str[HYPERPRIVOP_MAX+1] = {
- 0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
- "=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
- "=rr", "rr=", "kr=", "fc", "=cpuid", "=pmd", "=ar.eflg", "ar.eflg="
-};
-#endif
diff --git a/xen/arch/ia64/xen/regionreg.c b/xen/arch/ia64/xen/regionreg.c
deleted file mode 100644
index b2c7c50f6a..0000000000
--- a/xen/arch/ia64/xen/regionreg.c
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
- * Region register and region id management
- *
- * Copyright (C) 2001-2004 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com
- * Bret Mckee (bret.mckee@hp.com)
- *
- */
-
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/percpu.h>
-#include <asm/page.h>
-#include <asm/regionreg.h>
-#include <asm/vhpt.h>
-#include <asm/vcpu.h>
-#include <asm/percpu.h>
-#include <asm/pal.h>
-#include <asm/vmx_vcpu.h>
-
-/* Defined in xemasm.S */
-extern void ia64_new_rr7(unsigned long rid, void *shared_info,
- void *shared_arch_info, unsigned long shared_info_va,
- unsigned long va_vhpt);
-extern void ia64_new_rr7_efi(unsigned long rid, unsigned long repin_percpu,
- unsigned long vpd);
-
-/* RID virtualization mechanism is really simple: domains have less rid bits
- than the host and the host rid space is shared among the domains. (Values
- in parenthesis are usual default values).
-
- The host rid space is partitionned into MAX_RID_BLOCKS (= 64)
- blocks of 2**IA64_MIN_IMPL_RID_BITS (= 18) rids. The first block is also
- partitionned into MAX_RID_BLOCKS small blocks. Small blocks are used for
- metaphysical rids. Small block 0 can't be allocated and is reserved for
- Xen own rids during boot.
-
- Blocks and small blocks are allocated together and a domain may
- have one or more consecutive blocks (and small blocks).
-*/
-
-/* Minimum number of RID bits for a domain. The current value is 18, which is
- the minimum defined by the itanium architecture, but it can be lowered
- to increase the number of domain. */
-#define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
-/* Maximum number of RID bits. This is definitly 24. */
-#define IA64_MAX_IMPL_RID_BITS 24
-
-/* Maximum number of blocks. */
-#define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
-
-/* Default number of rid bits for domains. */
-static unsigned int domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
-integer_param("dom_rid_bits", domain_rid_bits_default);
-
-DEFINE_PER_CPU(unsigned long, domain_shared_info);
-DEFINE_PER_CPU(unsigned long, inserted_vhpt);
-DEFINE_PER_CPU(unsigned long, inserted_shared_info);
-DEFINE_PER_CPU(unsigned long, inserted_mapped_regs);
-DEFINE_PER_CPU(unsigned long, inserted_vpd);
-
-#if 0
-// following already defined in include/asm-ia64/gcc_intrin.h
-// it should probably be ifdef'd out from there to ensure all region
-// register usage is encapsulated in this file
-static inline unsigned long
-ia64_get_rr (unsigned long rr)
-{
- unsigned long r;
- __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
- return r;
-}
-
-static inline void
-ia64_set_rr (unsigned long rr, unsigned long rrv)
-{
- __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
-}
-#endif
-
-static unsigned long allocate_metaphysical_rr(struct domain *d, int n)
-{
- ia64_rr rrv;
- BUG_ON(d->arch.starting_mp_rid + n >= d->arch.ending_mp_rid);
-
- rrv.rrval = 0; // Or else may see reserved bit fault
- rrv.rid = d->arch.starting_mp_rid + n;
- rrv.ps = PAGE_SHIFT; // only used at domain creation
- rrv.ve = 0;
- /* Mangle metaphysical rid */
- rrv.rrval = vmMangleRID(rrv.rrval);
- return rrv.rrval;
-}
-
-/*************************************
- Region Block setup/management
-*************************************/
-
-static int implemented_rid_bits = 0;
-static int mp_rid_shift;
-static DEFINE_SPINLOCK(ridblock_lock);
-static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
-
-void __init init_rid_allocator (void)
-{
- int log_blocks;
- pal_vm_info_2_u_t vm_info_2;
-
- /* Get machine rid_size. */
- BUG_ON (ia64_pal_vm_summary (NULL, &vm_info_2) != 0);
- implemented_rid_bits = vm_info_2.pal_vm_info_2_s.rid_size;
-
- /* We need at least a few space... */
- BUG_ON (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS);
-
- /* And we can accept too much space. */
- if (implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
- implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
-
- /* Due to RID mangling, we expect 24 RID bits!
- This test should be removed if RID mangling is removed/modified. */
- if (implemented_rid_bits != 24) {
- printk ("RID mangling expected 24 RID bits, got only %d!\n",
- implemented_rid_bits);
- BUG();
- }
-
- /* Allow the creation of at least domain 0. */
- if (domain_rid_bits_default > implemented_rid_bits - 1)
- domain_rid_bits_default = implemented_rid_bits - 1;
-
- /* Check for too small values. */
- if (domain_rid_bits_default < IA64_MIN_IMPL_RID_BITS) {
- printk ("Default domain rid bits %d is too small, use %d\n",
- domain_rid_bits_default, IA64_MIN_IMPL_RID_BITS);
- domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
- }
-
- log_blocks = (implemented_rid_bits - IA64_MIN_IMPL_RID_BITS);
-
- printk ("Maximum number of domains: %d; %d RID bits per domain\n",
- (1 << (implemented_rid_bits - domain_rid_bits_default)) - 1,
- domain_rid_bits_default);
-
- mp_rid_shift = IA64_MIN_IMPL_RID_BITS - log_blocks;
- BUG_ON (mp_rid_shift < 3);
-}
-
-
-/*
- * Allocate a power-of-two-sized chunk of region id space -- one or more
- * "rid blocks"
- */
-int allocate_rid_range(struct domain *d, unsigned long ridbits)
-{
- int i, j, n_rid_blocks;
-
- if (ridbits == 0)
- ridbits = domain_rid_bits_default;
-
- if (ridbits >= IA64_MAX_IMPL_RID_BITS)
- ridbits = IA64_MAX_IMPL_RID_BITS - 1;
-
- if (ridbits < IA64_MIN_IMPL_RID_BITS)
- ridbits = IA64_MIN_IMPL_RID_BITS;
-
- // convert to rid_blocks and find one
- n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS);
-
- // skip over block 0, reserved for "meta-physical mappings (and Xen)"
- spin_lock(&ridblock_lock);
- for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
- if (ridblock_owner[i] == NULL) {
- for (j = i; j < i + n_rid_blocks; ++j) {
- if (ridblock_owner[j]) {
- ++j;
- break;
- }
- }
- --j;
- if (ridblock_owner[j] == NULL)
- break;
- }
- }
-
- if (i >= MAX_RID_BLOCKS) {
- spin_unlock(&ridblock_lock);
- return 0;
- }
-
- // found an unused block:
- // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
- // mark this block as owned
- for (j = i; j < i + n_rid_blocks; ++j)
- ridblock_owner[j] = d;
- spin_unlock(&ridblock_lock);
-
- // setup domain struct
- d->arch.rid_bits = ridbits;
- d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS;
- d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
-
- d->arch.starting_mp_rid = i << mp_rid_shift;
- d->arch.ending_mp_rid = (i + 1) << mp_rid_shift;
-
- d->arch.metaphysical_rid_dt = allocate_metaphysical_rr(d, 0);
- d->arch.metaphysical_rid_d = allocate_metaphysical_rr(d, 1);
-
- dprintk(XENLOG_DEBUG, "### domain %p: rid=%x-%x mp_rid=%x\n",
- d, d->arch.starting_rid, d->arch.ending_rid,
- d->arch.starting_mp_rid);
-
- return 1;
-}
-
-
-int deallocate_rid_range(struct domain *d)
-{
- int i;
- int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
- int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
-
- /* Sanity check. */
- if (d->arch.rid_bits == 0)
- return 1;
-
- spin_lock(&ridblock_lock);
- for (i = rid_block_start; i < rid_block_end; ++i) {
- ASSERT(ridblock_owner[i] == d);
- ridblock_owner[i] = NULL;
- }
- spin_unlock(&ridblock_lock);
-
- d->arch.rid_bits = 0;
- d->arch.starting_rid = 0;
- d->arch.ending_rid = 0;
- d->arch.starting_mp_rid = 0;
- d->arch.ending_mp_rid = 0;
- return 1;
-}
-
-static void
-set_rr(unsigned long rr, unsigned long rrval)
-{
- ia64_set_rr(rr, vmMangleRID(rrval));
- ia64_srlz_d();
-}
-
-static inline void
-ia64_new_rr7_vcpu(struct vcpu *v, unsigned long rid)
-{
- ia64_new_rr7(rid, v->domain->shared_info,
- v->arch.privregs, v->domain->arch.shared_info_va,
- __va_ul(vcpu_vhpt_maddr(v)));
-}
-
-// validates and changes a single region register
-// in the currently executing domain
-// Passing a value of -1 is a (successful) no-op
-// NOTE: DOES NOT SET VCPU's rrs[x] value!!
-int set_one_rr(unsigned long rr, unsigned long val)
-{
- struct vcpu *v = current;
- unsigned long rreg = REGION_NUMBER(rr);
- ia64_rr rrv, newrrv, memrrv;
- unsigned long newrid;
-
- rrv.rrval = val;
- newrrv.rrval = 0;
- newrid = v->arch.starting_rid + rrv.rid;
-
- // avoid reserved register/field fault
- if (unlikely(is_reserved_rr_field(v, val))) {
- printk("can't set rr%d to %lx, starting_rid=%x,"
- "ending_rid=%x, val=%lx\n", (int) rreg, newrid,
- v->arch.starting_rid,v->arch.ending_rid,val);
- return 0;
- }
-
- memrrv.rrval = rrv.rrval;
- newrrv.rid = newrid;
- newrrv.ve = 1; // VHPT now enabled for region 7!!
- newrrv.ps = v->arch.vhpt_pg_shift;
-
- if (rreg == 0) {
- v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
- if (!PSCB(v,metaphysical_mode))
- set_rr(rr,newrrv.rrval);
- } else if (rreg == 7) {
- __get_cpu_var(domain_shared_info) =
- (unsigned long)v->domain->shared_info;
-#if VHPT_ENABLED
- __get_cpu_var(inserted_vhpt) = __va_ul(vcpu_vhpt_maddr(v));
-#endif
- __get_cpu_var(inserted_shared_info) =
- v->domain->arch.shared_info_va;
- __get_cpu_var(inserted_mapped_regs) =
- v->domain->arch.shared_info_va +
- XMAPPEDREGS_OFS;
- ia64_new_rr7_vcpu(v, vmMangleRID(newrrv.rrval));
- } else {
- set_rr(rr,newrrv.rrval);
- }
- return 1;
-}
-
-int set_one_rr_efi(unsigned long rr, unsigned long val)
-{
- unsigned long rreg = REGION_NUMBER(rr);
- unsigned long vpd = 0UL;
-
- BUG_ON(rreg != 6 && rreg != 7);
-
- if (rreg == 6) {
- ia64_set_rr(rr, val);
- ia64_srlz_d();
- }
- else {
- if (current && VMX_DOMAIN(current))
- vpd = __get_cpu_var(inserted_vpd);
- ia64_new_rr7_efi(val, cpumask_test_cpu(smp_processor_id(),
- &percpu_set), vpd);
- }
-
- return 1;
-}
-
-void
-set_one_rr_efi_restore(unsigned long rr, unsigned long val)
-{
- unsigned long rreg = REGION_NUMBER(rr);
-
- BUG_ON(rreg != 6 && rreg != 7);
-
- if (rreg == 6) {
- ia64_set_rr(rr, val);
- ia64_srlz_d();
- } else {
- /* firmware call is done very early before struct vcpu
- and strcut domain are initialized. */
- if (unlikely(current == NULL || current->domain == NULL ||
- is_idle_vcpu(current)))
- ia64_new_rr7_efi(val, cpumask_test_cpu(smp_processor_id(),
- &percpu_set),
- 0UL);
- else if (VMX_DOMAIN(current))
- __vmx_switch_rr7_vcpu(current, val);
- else
- ia64_new_rr7_vcpu(current, val);
- }
-}
-
-void set_virtual_rr0(void)
-{
- struct vcpu *v = current;
-
- ia64_set_rr(0, v->arch.metaphysical_saved_rr0);
- ia64_srlz_d();
-}
-
-// set rr0 to the passed rid (for metaphysical mode so don't use domain offset
-void set_metaphysical_rr0(void)
-{
- struct vcpu *v = current;
-// ia64_rr rrv;
-
-// rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
- ia64_set_rr(0, v->arch.metaphysical_rid_dt);
- ia64_srlz_d();
-}
-
-void init_all_rr(struct vcpu *v)
-{
- ia64_rr rrv;
-
- rrv.rrval = 0;
- rrv.ps = v->arch.vhpt_pg_shift;
- rrv.ve = 1;
- if (!v->vcpu_info)
- panic("Stopping in init_all_rr\n");
- VCPU(v,rrs[0]) = rrv.rrval;
- VCPU(v,rrs[1]) = rrv.rrval;
- VCPU(v,rrs[2]) = rrv.rrval;
- VCPU(v,rrs[3]) = rrv.rrval;
- VCPU(v,rrs[4]) = rrv.rrval;
- VCPU(v,rrs[5]) = rrv.rrval;
- rrv.ve = 0;
- VCPU(v,rrs[6]) = rrv.rrval;
- VCPU(v,rrs[7]) = rrv.rrval;
-}
-
-
-/* XEN/ia64 INTERNAL ROUTINES */
-
-// loads a thread's region register (0-6) state into
-// the real physical region registers. Returns the
-// (possibly mangled) bits to store into rr7
-// iff it is different than what is currently in physical
-// rr7 (because we have to to assembly and physical mode
-// to change rr7). If no change to rr7 is required, returns 0.
-//
-void load_region_regs(struct vcpu *v)
-{
- unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
- // TODO: These probably should be validated
- unsigned long bad = 0;
-
- if (VCPU(v,metaphysical_mode)) {
- rr0 = v->domain->arch.metaphysical_rid_dt;
- ia64_set_rr(0x0000000000000000L, rr0);
- ia64_srlz_d();
- }
- else {
- rr0 = VCPU(v,rrs[0]);
- if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
- }
- rr1 = VCPU(v,rrs[1]);
- rr2 = VCPU(v,rrs[2]);
- rr3 = VCPU(v,rrs[3]);
- rr4 = VCPU(v,rrs[4]);
- rr5 = VCPU(v,rrs[5]);
- rr6 = VCPU(v,rrs[6]);
- rr7 = VCPU(v,rrs[7]);
- if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
- if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
- if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
- if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10;
- if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20;
- if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40;
- if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80;
- if (bad) {
- panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
- }
-}
diff --git a/xen/arch/ia64/xen/relocate_kernel.S b/xen/arch/ia64/xen/relocate_kernel.S
deleted file mode 100644
index 146ed14012..0000000000
--- a/xen/arch/ia64/xen/relocate_kernel.S
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * arch/ia64/kernel/relocate_kernel.S
- *
- * Relocate kexec'able kernel and start it
- *
- * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
- * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
- * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
- */
-#include <asm/asmmacro.h>
-#include <asm/kregs.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/mca_asm.h>
-
-/* relocate_new_kernel
- *
- * Do all the unpinning here, as the hypervisor has all the relevant
- * variables and constants. Then go into the reboot_code_buffer to
- * relocaate the new kernel and then branch into purgatory.
- *
- * Based on ia64_jump_to_sal
- *
- * in0: indirection_page
- * in1: start_address
- * in2: boot_param
- * in3: dom0_relocate_new_kernel
- */
-GLOBAL_ENTRY(relocate_new_kernel)
- .prologue
- alloc r31=ar.pfs,4,0,4,0
- .body
- rsm psr.i | psr.ic
-{
- flushrs
- srlz.i
-}
- movl r18=tlb_purge_done;;
- DATA_VA_TO_PA(r18);;
- mov b1=r18 // Return location
- movl r18=ia64_do_tlb_purge;;
- DATA_VA_TO_PA(r18);;
- mov b2=r18 // doing tlb_flush work
- mov ar.rsc=0 // Put RSE in enforced lazy, LE mode
- movl r17=1f;;
- DATA_VA_TO_PA(r17);;
- mov cr.iip=r17
- movl r16=IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC;;
- mov cr.ipsr=r16
- mov cr.ifs=r0;;
- rfi;;
-1:
- /* Invalidate all TLB data/inst */
- br.sptk.many b2;; // jump to tlb purge code
-
-tlb_purge_done:
- mov out0=in0 // out3 is ignored and thus can be garbage
- mov out1=in1
- mov out2=in2
- mov b1=in3
- ;;
- br.sptk.many b1;; // jump to dom0-supplied relocate_new_kernel
-
- /* We should never get here */
-END(relocate_new_kernel)
-
-GLOBAL_ENTRY(ia64_dump_cpu_regs)
- .prologue
- alloc loc0=ar.pfs,1,2,0,0
- .body
- mov ar.rsc=0 // put RSE in enforced lazy mode
- add loc1=4*8, in0 // save r4 and r5 first
- ;;
-{
- flushrs // flush dirty regs to backing store
- srlz.i
-}
- st8 [loc1]=r4, 8
- ;;
- st8 [loc1]=r5, 8
- ;;
- add loc1=32*8, in0
- mov r4=ar.rnat
- ;;
- st8 [in0]=r0, 8 // r0
- st8 [loc1]=r4, 8 // rnat
- mov r5=pr
- ;;
- st8 [in0]=r1, 8 // r1
- st8 [loc1]=r5, 8 // pr
- mov r4=b0
- ;;
- st8 [in0]=r2, 8 // r2
- st8 [loc1]=r4, 8 // b0
- mov r5=b1;
- ;;
- st8 [in0]=r3, 24 // r3
- st8 [loc1]=r5, 8 // b1
- mov r4=b2
- ;;
- st8 [in0]=r6, 8 // r6
- st8 [loc1]=r4, 8 // b2
- mov r5=b3
- ;;
- st8 [in0]=r7, 8 // r7
- st8 [loc1]=r5, 8 // b3
- mov r4=b4
- ;;
- st8 [in0]=r8, 8 // r8
- st8 [loc1]=r4, 8 // b4
- mov r5=b5
- ;;
- st8 [in0]=r9, 8 // r9
- st8 [loc1]=r5, 8 // b5
- mov r4=b6
- ;;
- st8 [in0]=r10, 8 // r10
- st8 [loc1]=r5, 8 // b6
- mov r5=b7
- ;;
- st8 [in0]=r11, 8 // r11
- st8 [loc1]=r5, 8 // b7
- mov r4=b0
- ;;
- st8 [in0]=r12, 8 // r12
- st8 [loc1]=r4, 8 // ip
- mov r5=loc0
- ;;
- st8 [in0]=r13, 8 // r13
- extr.u r5=r5, 0, 38 // ar.pfs.pfm
- mov r4=r0 // user mask
- ;;
- st8 [in0]=r14, 8 // r14
- st8 [loc1]=r5, 8 // cfm
- ;;
- st8 [in0]=r15, 8 // r15
- st8 [loc1]=r4, 8 // user mask
- mov r5=ar.rsc
- ;;
- st8 [in0]=r16, 8 // r16
- st8 [loc1]=r5, 8 // ar.rsc
- mov r4=ar.bsp
- ;;
- st8 [in0]=r17, 8 // r17
- st8 [loc1]=r4, 8 // ar.bsp
- mov r5=ar.bspstore
- ;;
- st8 [in0]=r18, 8 // r18
- st8 [loc1]=r5, 8 // ar.bspstore
- mov r4=ar.rnat
- ;;
- st8 [in0]=r19, 8 // r19
- st8 [loc1]=r4, 8 // ar.rnat
- mov r5=ar.ccv
- ;;
- st8 [in0]=r20, 8 // r20
- st8 [loc1]=r5, 8 // ar.ccv
- mov r4=ar.unat
- ;;
- st8 [in0]=r21, 8 // r21
- st8 [loc1]=r4, 8 // ar.unat
- mov r5 = ar.fpsr
- ;;
- st8 [in0]=r22, 8 // r22
- st8 [loc1]=r5, 8 // ar.fpsr
- mov r4 = ar.unat
- ;;
- st8 [in0]=r23, 8 // r23
- st8 [loc1]=r4, 8 // unat
- mov r5 = ar.fpsr
- ;;
- st8 [in0]=r24, 8 // r24
- st8 [loc1]=r5, 8 // fpsr
- mov r4 = ar.pfs
- ;;
- st8 [in0]=r25, 8 // r25
- st8 [loc1]=r4, 8 // ar.pfs
- mov r5 = ar.lc
- ;;
- st8 [in0]=r26, 8 // r26
- st8 [loc1]=r5, 8 // ar.lc
- mov r4 = ar.ec
- ;;
- st8 [in0]=r27, 8 // r27
- st8 [loc1]=r4, 8 // ar.ec
- mov r5 = ar.csd
- ;;
- st8 [in0]=r28, 8 // r28
- st8 [loc1]=r5, 8 // ar.csd
- mov r4 = ar.ssd
- ;;
- st8 [in0]=r29, 8 // r29
- st8 [loc1]=r4, 8 // ar.ssd
- ;;
- st8 [in0]=r30, 8 // r30
- ;;
- st8 [in0]=r31, 8 // r31
- mov ar.pfs=loc0
- ;;
- br.ret.sptk.many rp
-END(ia64_dump_cpu_regs)
-
-
diff --git a/xen/arch/ia64/xen/sn_console.c b/xen/arch/ia64/xen/sn_console.c
deleted file mode 100644
index ddd6c79dca..0000000000
--- a/xen/arch/ia64/xen/sn_console.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * C-Brick Serial Port (and console) driver for SGI Altix machines.
- *
- * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-#include <xen/lib.h>
-#include <asm/acpi.h>
-#include <asm/sn/sn_sal.h>
-#include <xen/serial.h>
-#include <xen/sched.h>
-
-struct sn_console_data {
- struct timer timer;
- unsigned int timeout_ms;
- int booted;
-};
-
-static struct sn_console_data console_data = {
- .timeout_ms = 8 * 16 * 1000 / 9600,
-};
-
-
-/*
- * sn_putc - Send a character to the console, polled or interrupt mode
- */
-static void sn_putc(struct serial_port *port, char c)
-{
- struct sn_console_data *sndata = port->uart;
-
- if (sndata->booted)
- ia64_sn_console_putb(&c, 1);
- else
- ia64_sn_console_putc(c);
-}
-
-/*
- * sn_getc - Get a character from the console, polled or interrupt mode
- */
-static int sn_getc(struct serial_port *port, char *pc)
-{
- int ch;
-
- ia64_sn_console_getc(&ch);
- *pc = ch & 0xff;
- return 1;
-}
-
-static void __init sn_endboot(struct serial_port *port)
-{
- struct sn_console_data *sndata = port->uart;
-
- sndata->booted = 1;
-}
-
-
-static void sn_poll(void *data)
-{
- int ch, status;
- struct serial_port *port = data;
- struct sn_console_data *sndata = port->uart;
- struct cpu_user_regs *regs = guest_cpu_user_regs();
-
- status = ia64_sn_console_check(&ch);
- if (!status && ch) {
- serial_rx_interrupt(port, regs);
- }
- set_timer(&sndata->timer, NOW() + MILLISECS(sndata->timeout_ms));
-}
-
-
-static void __init sn_init_postirq(struct serial_port *port)
-{
- struct sn_console_data *sndata = port->uart;
-
- init_timer(&sndata->timer, sn_poll, port, 0);
- set_timer(&sndata->timer, NOW() + MILLISECS(console_data.timeout_ms));
-}
-
-static void sn_resume(struct serial_port *port)
-{
- struct sn_console_data *sndata = port->uart;
-
- set_timer(&sndata->timer, NOW() + MILLISECS(console_data.timeout_ms));
-}
-
-static struct uart_driver sn_sal_console = {
- .init_postirq = sn_init_postirq,
- .resume = sn_resume,
- .putc = sn_putc,
- .getc = sn_getc,
- .endboot = sn_endboot,
-};
-
-
-/**
- * early_sn_setup - early setup routine for SN platforms
- *
- * pulled from arch/ia64/sn/kernel/setup.c
- */
-static void __init early_sn_setup(void)
-{
- efi_system_table_t *efi_systab;
- efi_config_table_t *config_tables;
- struct ia64_sal_systab *sal_systab;
- struct ia64_sal_desc_entry_point *ep;
- char *p;
- int i, j;
-
- /*
- * Parse enough of the SAL tables to locate the SAL entry point. Since, console
- * IO on SN2 is done via SAL calls, early_printk won't work without this.
- *
- * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
- * Any changes to those file may have to be made hereas well.
- */
- efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
- config_tables = __va(efi_systab->tables);
- for (i = 0; i < efi_systab->nr_tables; i++) {
- if (!efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID)) {
- sal_systab = __va(config_tables[i].table);
- p = (char *)(sal_systab + 1);
- for (j = 0; j < sal_systab->entry_count; j++) {
- if (*p == SAL_DESC_ENTRY_POINT) {
- ep = (struct ia64_sal_desc_entry_point
- *)p;
- ia64_sal_handler_init(__va
- (ep->sal_proc),
- __va(ep->gp));
- return;
- }
- p += SAL_DESC_SIZE(*p);
- }
- }
- }
- /* Uh-oh, SAL not available?? */
- printk(KERN_ERR "failed to find SAL entry point\n");
-}
-
-/**
- * sn_serial_console_early_setup - Sets up early console output support
- *
- * pulled from drivers/serial/sn_console.c
- */
-int __init sn_serial_console_early_setup(void)
-{
- if (strcmp("sn2", acpi_get_sysname()))
- return -1;
-
- early_sn_setup(); /* Find SAL entry points */
- serial_register_uart(0, &sn_sal_console, &console_data);
-
- return 0;
-}
diff --git a/xen/arch/ia64/xen/tlb_track.c b/xen/arch/ia64/xen/tlb_track.c
deleted file mode 100644
index ec1d7b37f7..0000000000
--- a/xen/arch/ia64/xen/tlb_track.c
+++ /dev/null
@@ -1,533 +0,0 @@
-/******************************************************************************
- * tlb_track.c
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <asm/tlb_track.h>
-#include <asm/p2m_entry.h>
-#include <xen/grant_table.h>
-#include <asm/vmx_mm_def.h> /* for IA64_RR_SHIFT */
-#include <asm/vmx_vcpu.h> /* for VRN7 */
-#include <asm/vcpu.h> /* for PSCB() */
-
-#define CONFIG_TLB_TRACK_DEBUG
-#ifdef CONFIG_TLB_TRACK_DEBUG
-# define tlb_track_printd(fmt, ...) \
- dprintk(XENLOG_DEBUG, fmt, ##__VA_ARGS__)
-#else
-# define tlb_track_printd(fmt, ...) do { } while (0)
-#endif
-
-static int
-tlb_track_allocate_entries(struct tlb_track* tlb_track)
-{
- struct page_info* entry_page;
- struct tlb_track_entry* track_entries;
- unsigned int allocated;
- unsigned long i;
-
- BUG_ON(tlb_track->num_free > 0);
- if (tlb_track->num_entries >= tlb_track->limit) {
- dprintk(XENLOG_WARNING, "%s: num_entries %d limit %d\n",
- __func__, tlb_track->num_entries, tlb_track->limit);
- return -ENOMEM;
- }
- entry_page = alloc_domheap_page(NULL, 0);
- if (entry_page == NULL) {
- dprintk(XENLOG_WARNING,
- "%s: domheap page failed. num_entries %d limit %d\n",
- __func__, tlb_track->num_entries, tlb_track->limit);
- return -ENOMEM;
- }
-
- page_list_add(entry_page, &tlb_track->page_list);
- track_entries = (struct tlb_track_entry*)page_to_virt(entry_page);
- allocated = PAGE_SIZE / sizeof(track_entries[0]);
- tlb_track->num_entries += allocated;
- tlb_track->num_free += allocated;
- for (i = 0; i < allocated; i++) {
- list_add(&track_entries[i].list, &tlb_track->free_list);
- // tlb_track_printd("track_entries[%ld] 0x%p\n", i, &track_entries[i]);
- }
- tlb_track_printd("allocated %d num_entries %d num_free %d\n",
- allocated, tlb_track->num_entries, tlb_track->num_free);
- return 0;
-}
-
-
-int
-tlb_track_create(struct domain* d)
-{
- struct tlb_track* tlb_track = NULL;
- struct page_info* hash_page = NULL;
- unsigned int hash_size;
- unsigned int hash_shift;
- unsigned int i;
-
- tlb_track = xmalloc(struct tlb_track);
- if (tlb_track == NULL)
- goto out;
-
- hash_page = alloc_domheap_page(NULL, 0);
- if (hash_page == NULL)
- goto out;
-
- spin_lock_init(&tlb_track->free_list_lock);
- INIT_LIST_HEAD(&tlb_track->free_list);
- tlb_track->limit = TLB_TRACK_LIMIT_ENTRIES;
- tlb_track->num_entries = 0;
- tlb_track->num_free = 0;
- INIT_PAGE_LIST_HEAD(&tlb_track->page_list);
- if (tlb_track_allocate_entries(tlb_track) < 0)
- goto out;
-
- spin_lock_init(&tlb_track->hash_lock);
- /* XXX hash size optimization */
- hash_size = PAGE_SIZE / sizeof(tlb_track->hash[0]);
- for (hash_shift = 0; (1 << (hash_shift + 1)) < hash_size; hash_shift++)
- /* nothing */;
- tlb_track->hash_size = (1 << hash_shift);
- tlb_track->hash_shift = hash_shift;
- tlb_track->hash_mask = (1 << hash_shift) - 1;
- tlb_track->hash = page_to_virt(hash_page);
- for (i = 0; i < tlb_track->hash_size; i++)
- INIT_LIST_HEAD(&tlb_track->hash[i]);
-
- smp_mb(); /* make initialization visible before use. */
- d->arch.tlb_track = tlb_track;
- dprintk(XENLOG_DEBUG, "hash 0x%p hash_size %d\n",
- tlb_track->hash, tlb_track->hash_size);
-
- return 0;
-
-out:
- if (hash_page != NULL)
- free_domheap_page(hash_page);
-
- if (tlb_track != NULL)
- xfree(tlb_track);
-
- return -ENOMEM;
-}
-
-void
-tlb_track_destroy(struct domain* d)
-{
- struct tlb_track* tlb_track = d->arch.tlb_track;
- struct page_info* page;
- struct page_info* next;
-
- spin_lock(&tlb_track->free_list_lock);
- BUG_ON(tlb_track->num_free != tlb_track->num_entries);
-
- page_list_for_each_safe(page, next, &tlb_track->page_list) {
- page_list_del(page, &tlb_track->page_list);
- free_domheap_page(page);
- }
-
- free_domheap_page(virt_to_page(tlb_track->hash));
- xfree(tlb_track);
- // d->tlb_track = NULL;
-}
-
-static struct tlb_track_entry*
-tlb_track_get_entry(struct tlb_track* tlb_track)
-{
- struct tlb_track_entry* entry = NULL;
- spin_lock(&tlb_track->free_list_lock);
- if (tlb_track->num_free == 0)
- (void)tlb_track_allocate_entries(tlb_track);
-
- if (tlb_track->num_free > 0) {
- BUG_ON(list_empty(&tlb_track->free_list));
- entry = list_entry(tlb_track->free_list.next,
- struct tlb_track_entry, list);
- tlb_track->num_free--;
- list_del(&entry->list);
- }
- spin_unlock(&tlb_track->free_list_lock);
- return entry;
-}
-
-void
-tlb_track_free_entry(struct tlb_track* tlb_track,
- struct tlb_track_entry* entry)
-{
- spin_lock(&tlb_track->free_list_lock);
- list_add(&entry->list, &tlb_track->free_list);
- tlb_track->num_free++;
- spin_unlock(&tlb_track->free_list_lock);
-}
-
-
-#include <linux/hash.h>
-/* XXX hash function. */
-static struct list_head*
-tlb_track_hash_head(struct tlb_track* tlb_track, volatile pte_t* ptep)
-{
- unsigned long hash = hash_long((unsigned long)ptep, tlb_track->hash_shift);
- BUG_ON(hash >= tlb_track->hash_size);
- BUG_ON((hash & tlb_track->hash_mask) != hash);
- return &tlb_track->hash[hash];
-}
-
-static int
-tlb_track_pte_zapped(pte_t old_pte, pte_t ret_pte)
-{
- if (pte_pfn(old_pte) != pte_pfn(ret_pte) ||
- (pte_val(old_pte) & ~(_PFN_MASK | _PAGE_TLB_TRACK_MASK)) !=
- (pte_val(ret_pte) & ~(_PFN_MASK | _PAGE_TLB_TRACK_MASK))) {
- /* Other thread zapped the p2m entry. */
- return 1;
- }
- return 0;
-}
-
-static TLB_TRACK_RET_T
-tlb_track_insert_or_dirty(struct tlb_track* tlb_track, struct mm_struct* mm,
- volatile pte_t* ptep, pte_t old_pte,
- unsigned long vaddr, unsigned long rid)
-{
- unsigned long mfn = pte_pfn(old_pte);
- struct list_head* head = tlb_track_hash_head(tlb_track, ptep);
- struct tlb_track_entry* entry;
- struct tlb_track_entry* new_entry = NULL;
- unsigned long bit_to_be_set = _PAGE_TLB_INSERTED;
- pte_t new_pte;
- pte_t ret_pte;
-
- struct vcpu* v = current;
- TLB_TRACK_RET_T ret = TLB_TRACK_NOT_FOUND;
-
-#if 0 /* this is done at vcpu_tlb_track_insert_or_dirty() */
- perfc_incr(tlb_track_iod);
- if (!pte_tlb_tracking(old_pte)) {
- perfc_incr(tlb_track_iod_not_tracked);
- return TLB_TRACK_NOT_TRACKED;
- }
-#endif
- if (pte_tlb_inserted_many(old_pte)) {
- perfc_incr(tlb_track_iod_tracked_many);
- return TLB_TRACK_MANY;
- }
-
- /* vaddr must be normalized so that it is in vrn7 and page aligned. */
- BUG_ON((vaddr >> IA64_RR_SHIFT) != VRN7);
- BUG_ON((vaddr & ~PAGE_MASK) != 0);
-#if 0
- tlb_track_printd("\n"
- "\tmfn 0x%016lx\n"
- "\told_pte 0x%016lx ptep 0x%p\n"
- "\tptep_val 0x%016lx vaddr 0x%016lx rid %ld\n"
- "\ttlb_track 0x%p head 0x%p\n",
- mfn,
- pte_val(old_pte), ptep, pte_val(*ptep),
- vaddr, rid,
- tlb_track, head);
-#endif
-
- again:
- /*
- * zapping side may zap the p2m entry and then remove tlb track entry
- * non-atomically. We may see the stale tlb track entry here.
- * p2m_entry_retry() handles such a case.
- * Or other thread may zap the p2m entry and remove tlb track entry
- * and inserted new tlb track entry.
- */
- spin_lock(&tlb_track->hash_lock);
- list_for_each_entry(entry, head, list) {
- if (entry->ptep != ptep)
- continue;
-
- if (pte_pfn(entry->pte_val) == mfn) {
- // tlb_track_entry_printf(entry);
- if (entry->vaddr == vaddr && entry->rid == rid) {
- // tlb_track_printd("TLB_TRACK_FOUND\n");
- ret = TLB_TRACK_FOUND;
- perfc_incr(tlb_track_iod_found);
-#ifdef CONFIG_TLB_TRACK_CNT
- entry->cnt++;
- if (entry->cnt > TLB_TRACK_CNT_FORCE_MANY) {
- /*
- * heuristics:
- * If a page is used to transfer data by dev channel,
- * it would be unmapped with small amount access
- * (once or twice tlb insert) after real device
- * I/O completion. It would be short period.
- * However this page seems to be accessed many times.
- * We guess that this page is used I/O ring
- * so that tracking this entry might be useless.
- */
- // tlb_track_entry_printf(entry);
- // tlb_track_printd("cnt = %ld\n", entry->cnt);
- perfc_incr(tlb_track_iod_force_many);
- goto force_many;
- }
-#endif
- goto found;
- } else {
-#ifdef CONFIG_TLB_TRACK_CNT
- force_many:
-#endif
- if (!pte_tlb_inserted(old_pte)) {
- printk("%s:%d racy update\n", __func__, __LINE__);
- old_pte = __pte(pte_val(old_pte) | _PAGE_TLB_INSERTED);
- }
- new_pte = __pte(pte_val(old_pte) | _PAGE_TLB_INSERTED_MANY);
- ret_pte = ptep_cmpxchg_rel(mm, vaddr, ptep, old_pte, new_pte);
- if (pte_val(ret_pte) != pte_val(old_pte)) {
- // tlb_track_printd("TLB_TRACK_AGAIN\n");
- ret = TLB_TRACK_AGAIN;
- perfc_incr(tlb_track_iod_again);
- } else {
- // tlb_track_printd("TLB_TRACK_MANY del entry 0x%p\n",
- // entry);
- ret = TLB_TRACK_MANY;
- list_del(&entry->list);
- // tlb_track_entry_printf(entry);
- perfc_incr(tlb_track_iod_tracked_many_del);
- }
- goto out;
- }
- }
-
- /*
- * Other thread changed the p2m entry and removed and inserted new
- * tlb tracn entry after we get old_pte, but before we get
- * spinlock.
- */
- // tlb_track_printd("TLB_TRACK_AGAIN\n");
- ret = TLB_TRACK_AGAIN;
- perfc_incr(tlb_track_iod_again);
- goto out;
- }
-
- entry = NULL; // prevent freeing entry.
- if (pte_tlb_inserted(old_pte)) {
- /* Other thread else removed the tlb_track_entry after we got old_pte
- before we got spin lock. */
- ret = TLB_TRACK_AGAIN;
- perfc_incr(tlb_track_iod_again);
- goto out;
- }
- if (new_entry == NULL && bit_to_be_set == _PAGE_TLB_INSERTED) {
- spin_unlock(&tlb_track->hash_lock);
- new_entry = tlb_track_get_entry(tlb_track);
- if (new_entry == NULL) {
- tlb_track_printd("get_entry failed\n");
- /* entry can't be allocated.
- fall down into full flush mode. */
- bit_to_be_set |= _PAGE_TLB_INSERTED_MANY;
- perfc_incr(tlb_track_iod_new_failed);
- }
- // tlb_track_printd("new_entry 0x%p\n", new_entry);
- perfc_incr(tlb_track_iod_new_entry);
- goto again;
- }
-
- BUG_ON(pte_tlb_inserted_many(old_pte));
- new_pte = __pte(pte_val(old_pte) | bit_to_be_set);
- ret_pte = ptep_cmpxchg_rel(mm, vaddr, ptep, old_pte, new_pte);
- if (pte_val(old_pte) != pte_val(ret_pte)) {
- if (tlb_track_pte_zapped(old_pte, ret_pte)) {
- // tlb_track_printd("zapped TLB_TRACK_AGAIN\n");
- ret = TLB_TRACK_AGAIN;
- perfc_incr(tlb_track_iod_again);
- goto out;
- }
-
- /* Other thread set _PAGE_TLB_INSERTED and/or _PAGE_TLB_INSERTED_MANY */
- if (pte_tlb_inserted_many(ret_pte)) {
- /* Other thread already set _PAGE_TLB_INSERTED_MANY and
- removed the entry. */
- // tlb_track_printd("iserted TLB_TRACK_MANY\n");
- BUG_ON(!pte_tlb_inserted(ret_pte));
- ret = TLB_TRACK_MANY;
- perfc_incr(tlb_track_iod_new_many);
- goto out;
- }
- BUG_ON(pte_tlb_inserted(ret_pte));
- BUG();
- }
- if (new_entry) {
- // tlb_track_printd("iserting new_entry 0x%p\n", new_entry);
- entry = new_entry;
- new_entry = NULL;
-
- entry->ptep = ptep;
- entry->pte_val = old_pte;
- entry->vaddr = vaddr;
- entry->rid = rid;
- cpumask_clear(&entry->pcpu_dirty_mask);
- vcpus_clear(entry->vcpu_dirty_mask);
- list_add(&entry->list, head);
-
-#ifdef CONFIG_TLB_TRACK_CNT
- entry->cnt = 0;
-#endif
- perfc_incr(tlb_track_iod_insert);
- // tlb_track_entry_printf(entry);
- } else {
- goto out;
- }
-
- found:
- BUG_ON(v->processor >= NR_CPUS);
- cpumask_set_cpu(v->processor, &entry->pcpu_dirty_mask);
- BUG_ON(v->vcpu_id >= NR_CPUS);
- vcpu_set(v->vcpu_id, entry->vcpu_dirty_mask);
- perfc_incr(tlb_track_iod_dirtied);
-
- out:
- spin_unlock(&tlb_track->hash_lock);
- if (ret == TLB_TRACK_MANY && entry != NULL)
- tlb_track_free_entry(tlb_track, entry);
- if (new_entry != NULL)
- tlb_track_free_entry(tlb_track, new_entry);
- return ret;
-}
-
-void
-__vcpu_tlb_track_insert_or_dirty(struct vcpu *vcpu, unsigned long vaddr,
- struct p2m_entry* entry)
-{
- unsigned long vrn = vaddr >> IA64_RR_SHIFT;
- unsigned long rid = PSCB(vcpu, rrs[vrn]);
- TLB_TRACK_RET_T ret;
-
- /* normalize vrn7
- When linux dom0 case, vrn7 is the most common case. */
- vaddr |= VRN7 << VRN_SHIFT;
- vaddr &= PAGE_MASK;
- ret = tlb_track_insert_or_dirty(vcpu->domain->arch.tlb_track,
- &vcpu->domain->arch.mm,
- entry->ptep, entry->used,
- vaddr, rid);
- if (ret == TLB_TRACK_AGAIN)
- p2m_entry_set_retry(entry);
-}
-
-TLB_TRACK_RET_T
-tlb_track_search_and_remove(struct tlb_track* tlb_track,
- volatile pte_t* ptep, pte_t old_pte,
- struct tlb_track_entry** entryp)
-{
- unsigned long mfn = pte_pfn(old_pte);
- struct list_head* head = tlb_track_hash_head(tlb_track, ptep);
- struct tlb_track_entry* entry;
-
- perfc_incr(tlb_track_sar);
- if (!pte_tlb_tracking(old_pte)) {
- perfc_incr(tlb_track_sar_not_tracked);
- return TLB_TRACK_NOT_TRACKED;
- }
- if (!pte_tlb_inserted(old_pte)) {
- BUG_ON(pte_tlb_inserted_many(old_pte));
- perfc_incr(tlb_track_sar_not_found);
- return TLB_TRACK_NOT_FOUND;
- }
- if (pte_tlb_inserted_many(old_pte)) {
- BUG_ON(!pte_tlb_inserted(old_pte));
- perfc_incr(tlb_track_sar_many);
- return TLB_TRACK_MANY;
- }
-
- spin_lock(&tlb_track->hash_lock);
- list_for_each_entry(entry, head, list) {
- if (entry->ptep != ptep)
- continue;
-
- if (pte_pfn(entry->pte_val) == mfn) {
- /*
- * PARANOIA
- * We're here after zapping p2m entry. However another pCPU
- * may update the same p2m entry entry the same mfn at the
- * same time in theory. In such a case, we can't determine
- * whether this entry is for us or for the racy p2m update.
- * Such a guest domain's racy behaviour doesn't make sense,
- * but is allowed. Go the very pessimistic way. Leave this
- * entry to be found later and do full flush at this time.
- *
- * NOTE: Updating tlb tracking hash is protected by spin lock and
- * setting _PAGE_TLB_INSERTED and_PAGE_TLB_INSERTED_MANY bits
- * is serialized by the same spin lock.
- * See tlb_track_insert_or_dirty().
- */
- pte_t current_pte = *ptep;
- if (unlikely(pte_pfn(current_pte) == mfn &&
- pte_tlb_tracking(current_pte) &&
- pte_tlb_inserted(current_pte))) {
- BUG_ON(pte_tlb_inserted_many(current_pte));
- spin_unlock(&tlb_track->hash_lock);
- perfc_incr(tlb_track_sar_many);
- return TLB_TRACK_MANY;
- }
-
- list_del(&entry->list);
- spin_unlock(&tlb_track->hash_lock);
- *entryp = entry;
- perfc_incr(tlb_track_sar_found);
- // tlb_track_entry_printf(entry);
-#ifdef CONFIG_TLB_TRACK_CNT
- // tlb_track_printd("cnt = %ld\n", entry->cnt);
-#endif
- return TLB_TRACK_FOUND;
- }
- BUG();
- }
- BUG();
- spin_unlock(&tlb_track->hash_lock);
- return TLB_TRACK_NOT_TRACKED;
-}
-
-/* for debug */
-void
-__tlb_track_entry_printf(const char* func, int line,
- const struct tlb_track_entry* entry)
-{
- char pcpumask_buf[NR_CPUS + 1];
- char vcpumask_buf[MAX_VIRT_CPUS + 1];
- cpumask_scnprintf(pcpumask_buf, sizeof(pcpumask_buf),
- &entry->pcpu_dirty_mask);
- vcpumask_scnprintf(vcpumask_buf, sizeof(vcpumask_buf),
- entry->vcpu_dirty_mask);
- printk("%s:%d\n"
- "\tmfn 0x%016lx\n"
- "\told_pte 0x%016lx ptep 0x%p\n"
- "\tpte_val 0x%016lx vaddr 0x%016lx rid %ld\n"
- "\tpcpu_dirty_mask %s vcpu_dirty_mask %s\n"
- "\tentry 0x%p\n",
- func, line,
- pte_pfn(entry->pte_val),
- pte_val(entry->pte_val), entry->ptep, pte_val(*entry->ptep),
- entry->vaddr, entry->rid,
- pcpumask_buf, vcpumask_buf,
- entry);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/ia64/xen/vcpu.c b/xen/arch/ia64/xen/vcpu.c
deleted file mode 100644
index 40e3821e86..0000000000
--- a/xen/arch/ia64/xen/vcpu.c
+++ /dev/null
@@ -1,2320 +0,0 @@
-/*
- * Virtualized CPU functions
- *
- * Copyright (C) 2004-2005 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- *
- */
-
-#include <linux/sched.h>
-#include <public/xen.h>
-#include <xen/mm.h>
-#include <asm/ia64_int.h>
-#include <asm/vcpu.h>
-#include <asm/regionreg.h>
-#include <asm/tlb.h>
-#include <asm/processor.h>
-#include <asm/delay.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/vhpt.h>
-#include <asm/tlbflush.h>
-#include <asm/privop.h>
-#include <xen/event.h>
-#include <asm/vmx_phy_mode.h>
-#include <asm/bundle.h>
-#include <asm/privop_stat.h>
-#include <asm/uaccess.h>
-#include <asm/p2m_entry.h>
-#include <asm/tlb_track.h>
-
-/* FIXME: where these declarations should be there ? */
-extern void getreg(unsigned long regnum, unsigned long *val, int *nat,
- struct pt_regs *regs);
-extern void setreg(unsigned long regnum, unsigned long val, int nat,
- struct pt_regs *regs);
-extern void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
- struct pt_regs *regs);
-
-extern void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
- struct pt_regs *regs);
-
-typedef union {
- struct ia64_psr ia64_psr;
- unsigned long i64;
-} PSR;
-
-// this def for vcpu_regs won't work if kernel stack is present
-//#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
-
-#define IA64_PTA_SZ_BIT 2
-#define IA64_PTA_VF_BIT 8
-#define IA64_PTA_BASE_BIT 15
-#define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
-
-#define IA64_PSR_NON_VIRT_BITS \
- (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | \
- IA64_PSR_MFL| IA64_PSR_MFH| IA64_PSR_PK | \
- IA64_PSR_DFL| IA64_PSR_SP | IA64_PSR_DB | \
- IA64_PSR_LP | IA64_PSR_TB | IA64_PSR_ID | \
- IA64_PSR_DA | IA64_PSR_DD | IA64_PSR_SS | \
- IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
-
-unsigned long vcpu_verbose = 0;
-
-/**************************************************************************
- VCPU general register access routines
-**************************************************************************/
-#ifdef XEN
-u64 vcpu_get_gr(VCPU * vcpu, unsigned long reg)
-{
- REGS *regs = vcpu_regs(vcpu);
- u64 val;
-
- if (!reg)
- return 0;
- getreg(reg, &val, 0, regs); // FIXME: handle NATs later
- return val;
-}
-
-IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val)
-{
- REGS *regs = vcpu_regs(vcpu);
- int nat;
-
- getreg(reg, val, &nat, regs); // FIXME: handle NATs later
- if (nat)
- return IA64_NAT_CONSUMPTION_VECTOR;
- return 0;
-}
-
-// returns:
-// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
-// IA64_NO_FAULT otherwise
-IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value, int nat)
-{
- REGS *regs = vcpu_regs(vcpu);
- long sof = (regs->cr_ifs) & 0x7f;
-
- if (!reg)
- return IA64_ILLOP_FAULT;
- if (reg >= sof + 32)
- return IA64_ILLOP_FAULT;
- setreg(reg, value, nat, regs); // FIXME: handle NATs later
- return IA64_NO_FAULT;
-}
-
-IA64FAULT
-vcpu_get_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
-{
- REGS *regs = vcpu_regs(vcpu);
- getfpreg(reg, val, regs); // FIXME: handle NATs later
- return IA64_NO_FAULT;
-}
-
-IA64FAULT
-vcpu_set_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
-{
- REGS *regs = vcpu_regs(vcpu);
- if (reg > 1)
- setfpreg(reg, val, regs); // FIXME: handle NATs later
- return IA64_NO_FAULT;
-}
-
-#else
-// returns:
-// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
-// IA64_NO_FAULT otherwise
-IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value)
-{
- REGS *regs = vcpu_regs(vcpu);
- long sof = (regs->cr_ifs) & 0x7f;
-
- if (!reg)
- return IA64_ILLOP_FAULT;
- if (reg >= sof + 32)
- return IA64_ILLOP_FAULT;
- setreg(reg, value, 0, regs); // FIXME: handle NATs later
- return IA64_NO_FAULT;
-}
-
-#endif
-
-void vcpu_init_regs(struct vcpu *v)
-{
- struct pt_regs *regs;
-
- regs = vcpu_regs(v);
- if (VMX_DOMAIN(v)) {
- /* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
- regs->cr_ipsr = IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT |
- IA64_PSR_I | IA64_PSR_IC | IA64_PSR_SI |
- IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_VM;
- /* lazy fp */
- FP_PSR(v) = IA64_PSR_DFH;
- regs->cr_ipsr |= IA64_PSR_DFH;
- } else {
- regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
- | IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
- regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR
- | IA64_PSR_RI | IA64_PSR_IS);
- // domain runs at PL2
- regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,IA64_PSR_CPL0_BIT);
- // lazy fp
- PSCB(v, hpsr_dfh) = 1;
- PSCB(v, hpsr_mfh) = 0;
- regs->cr_ipsr |= IA64_PSR_DFH;
- }
- regs->cr_ifs = 1UL << 63; /* or clear? */
- regs->ar_fpsr = FPSR_DEFAULT;
-
- if (VMX_DOMAIN(v)) {
- vmx_init_all_rr(v);
- /* Virtual processor context setup */
- VCPU(v, vpsr) = IA64_PSR_BN;
- VCPU(v, dcr) = 0;
- } else {
- init_all_rr(v);
- regs->ar_rsc = vcpu_pl_adjust(regs->ar_rsc, 2);
- VCPU(v, banknum) = 1;
- VCPU(v, metaphysical_mode) = 1;
- VCPU(v, interrupt_mask_addr) =
- (unsigned char *)v->domain->arch.shared_info_va +
- INT_ENABLE_OFFSET(v);
- VCPU(v, itv) = (1 << 16); /* timer vector masked */
-
- v->vcpu_info->evtchn_upcall_pending = 0;
- v->vcpu_info->evtchn_upcall_mask = -1;
- }
-
- /* pta.size must not be 0. The minimum is 15 (32k) */
- VCPU(v, pta) = 15 << 2;
-
- v->arch.domain_itm_last = -1L;
-}
-
-/**************************************************************************
- VCPU privileged application register access routines
-**************************************************************************/
-
-void vcpu_load_kernel_regs(VCPU * vcpu)
-{
- ia64_set_kr(0, VCPU(vcpu, krs[0]));
- ia64_set_kr(1, VCPU(vcpu, krs[1]));
- ia64_set_kr(2, VCPU(vcpu, krs[2]));
- ia64_set_kr(3, VCPU(vcpu, krs[3]));
- ia64_set_kr(4, VCPU(vcpu, krs[4]));
- ia64_set_kr(5, VCPU(vcpu, krs[5]));
- ia64_set_kr(6, VCPU(vcpu, krs[6]));
- ia64_set_kr(7, VCPU(vcpu, krs[7]));
-}
-
-/* GCC 4.0.2 seems not to be able to suppress this call!. */
-#define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
-
-IA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val)
-{
- if (reg == 44)
- return vcpu_set_itc(vcpu, val);
- else if (reg == 27)
- return IA64_ILLOP_FAULT;
- else if (reg == 24)
- printk("warning: setting ar.eflg is a no-op; no IA-32 "
- "support\n");
- else if (reg > 7)
- return IA64_ILLOP_FAULT;
- else {
- PSCB(vcpu, krs[reg]) = val;
- ia64_set_kr(reg, val);
- }
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val)
-{
- if (reg == 24)
- printk("warning: getting ar.eflg is a no-op; no IA-32 "
- "support\n");
- else if (reg > 7)
- return IA64_ILLOP_FAULT;
- else
- *val = PSCB(vcpu, krs[reg]);
- return IA64_NO_FAULT;
-}
-
-/**************************************************************************
- VCPU protection key emulating for PV
- This first implementation reserves 1 pkr for the hypervisor key.
- On setting psr.pk the hypervisor key is loaded in pkr[15], therewith the
- hypervisor may run with psr.pk==1. The key for the hypervisor is 0.
- Furthermore the VCPU is flagged to use the protection keys.
- Currently the domU has to take care of the used keys, because on setting
- a pkr there is no check against other pkr's whether this key is already
- used.
-**************************************************************************/
-
-/* The function loads the protection key registers from the struct arch_vcpu
- * into the processor pkr's! Called in context_switch().
- * TODO: take care of the order of writing pkr's!
- */
-void vcpu_pkr_load_regs(VCPU * vcpu)
-{
- int i;
-
- for (i = 0; i <= XEN_IA64_NPKRS; i++)
- ia64_set_pkr(i, PSCBX(vcpu, pkrs[i]));
-}
-
-/* The function activates the pkr handling. */
-static void vcpu_pkr_set_psr_handling(VCPU * vcpu)
-{
- if (PSCBX(vcpu, pkr_flags) & XEN_IA64_PKR_IN_USE)
- return;
-
- vcpu_pkr_use_set(vcpu);
- PSCBX(vcpu, pkrs[XEN_IA64_NPKRS]) = XEN_IA64_PKR_VAL;
-
- /* Write the special key for the hypervisor into pkr[15]. */
- ia64_set_pkr(XEN_IA64_NPKRS, XEN_IA64_PKR_VAL);
-}
-
-/**************************************************************************
- VCPU processor status register access routines
-**************************************************************************/
-
-static void vcpu_set_metaphysical_mode(VCPU * vcpu, BOOLEAN newmode)
-{
- /* only do something if mode changes */
- if (!!newmode ^ !!PSCB(vcpu, metaphysical_mode)) {
- PSCB(vcpu, metaphysical_mode) = newmode;
- if (newmode)
- set_metaphysical_rr0();
- else
- set_virtual_rr0();
- }
-}
-
-IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu)
-{
- vcpu_set_metaphysical_mode(vcpu, TRUE);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24)
-{
- struct ia64_psr imm, *ipsr;
- REGS *regs = vcpu_regs(vcpu);
-
- //PRIVOP_COUNT_ADDR(regs,_RSM);
- // TODO: All of these bits need to be virtualized
- // TODO: Only allowed for current vcpu
- ipsr = (struct ia64_psr *)&regs->cr_ipsr;
- imm = *(struct ia64_psr *)&imm24;
- // interrupt flag
- if (imm.i)
- vcpu->vcpu_info->evtchn_upcall_mask = 1;
- if (imm.ic)
- PSCB(vcpu, interrupt_collection_enabled) = 0;
- // interrupt collection flag
- //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
- // just handle psr.up and psr.pp for now
- if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP |
- IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT |
- IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_PK))
- return IA64_ILLOP_FAULT;
- if (imm.dfh) {
- ipsr->dfh = PSCB(vcpu, hpsr_dfh);
- PSCB(vcpu, vpsr_dfh) = 0;
- }
- if (imm.dfl)
- ipsr->dfl = 0;
- if (imm.pp) {
- // xenoprof:
- // Don't change psr.pp and ipsr->pp
- // They are manipulated by xenoprof
- // psr.pp = 1;
- // ipsr->pp = 1;
- PSCB(vcpu, vpsr_pp) = 0; // but fool the domain if it gets psr
- }
- if (imm.up)
- ipsr->up = 0;
- if (imm.sp)
- ipsr->sp = 0;
- if (imm.be)
- ipsr->be = 0;
- if (imm.dt)
- vcpu_set_metaphysical_mode(vcpu, TRUE);
- if (imm.pk) {
- ipsr->pk = 0;
- vcpu_pkr_use_unset(vcpu);
- }
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_psr_dt(VCPU * vcpu)
-{
- vcpu_set_metaphysical_mode(vcpu, FALSE);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_psr_i(VCPU * vcpu)
-{
- vcpu->vcpu_info->evtchn_upcall_mask = 0;
- PSCB(vcpu, interrupt_collection_enabled) = 1;
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24)
-{
- struct ia64_psr imm, *ipsr;
- REGS *regs = vcpu_regs(vcpu);
- u64 mask, enabling_interrupts = 0;
-
- //PRIVOP_COUNT_ADDR(regs,_SSM);
- // TODO: All of these bits need to be virtualized
- imm = *(struct ia64_psr *)&imm24;
- ipsr = (struct ia64_psr *)&regs->cr_ipsr;
- // just handle psr.sp,pp and psr.i,ic (and user mask) for now
- mask =
- IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_UM |
- IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_BE |
- IA64_PSR_PK;
- if (imm24 & ~mask)
- return IA64_ILLOP_FAULT;
- if (imm.dfh) {
- PSCB(vcpu, vpsr_dfh) = 1;
- ipsr->dfh = 1;
- }
- if (imm.dfl)
- ipsr->dfl = 1;
- if (imm.pp) {
- // xenoprof:
- // Don't change psr.pp and ipsr->pp
- // They are manipulated by xenoprof
- // psr.pp = 1;
- // ipsr->pp = 1;
- PSCB(vcpu, vpsr_pp) = 1;
- }
- if (imm.sp)
- ipsr->sp = 1;
- if (imm.i) {
- if (vcpu->vcpu_info->evtchn_upcall_mask) {
-//printk("vcpu_set_psr_sm: psr.ic 0->1\n");
- enabling_interrupts = 1;
- }
- vcpu->vcpu_info->evtchn_upcall_mask = 0;
- }
- if (imm.ic)
- PSCB(vcpu, interrupt_collection_enabled) = 1;
- // TODO: do this faster
- if (imm.mfl)
- ipsr->mfl = 1;
- if (imm.mfh)
- ipsr->mfh = 1;
- if (imm.ac)
- ipsr->ac = 1;
- if (imm.up)
- ipsr->up = 1;
- if (imm.be)
- ipsr->be = 1;
- if (imm.dt)
- vcpu_set_metaphysical_mode(vcpu, FALSE);
- if (imm.pk) {
- vcpu_pkr_set_psr_handling(vcpu);
- ipsr->pk = 1;
- }
- if (enabling_interrupts &&
- vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
- PSCB(vcpu, pending_interruption) = 1;
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val)
-{
- struct ia64_psr newpsr, *ipsr;
- REGS *regs = vcpu_regs(vcpu);
- u64 enabling_interrupts = 0;
-
- newpsr = *(struct ia64_psr *)&val;
- ipsr = (struct ia64_psr *)&regs->cr_ipsr;
-
- ipsr->be = newpsr.be;
- ipsr->up = newpsr.up;
- ipsr->ac = newpsr.ac;
- ipsr->mfl = newpsr.mfl;
- ipsr->mfh = newpsr.mfh;
-
- PSCB(vcpu, interrupt_collection_enabled) = newpsr.ic;
-
- if (newpsr.i && vcpu->vcpu_info->evtchn_upcall_mask)
- enabling_interrupts = 1;
-
- vcpu->vcpu_info->evtchn_upcall_mask = !(newpsr.i);
-
- if (newpsr.pk) {
- vcpu_pkr_set_psr_handling(vcpu);
- ipsr->pk = 1;
- } else
- vcpu_pkr_use_unset(vcpu);
-
- vcpu_set_metaphysical_mode(vcpu, !(newpsr.dt && newpsr.rt));
-
- ipsr->dfl = newpsr.dfl;
- PSCB(vcpu, vpsr_dfh) = newpsr.dfh;
- ipsr->dfh = newpsr.dfh ? 1 : PSCB(vcpu, hpsr_dfh);
-
- ipsr->sp = newpsr.sp;
-
- /* xenoprof: Don't change ipsr->pp, it is manipulated by xenoprof */
- PSCB(vcpu, vpsr_pp) = newpsr.pp;
-
- if (enabling_interrupts &&
- vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
- PSCB(vcpu, pending_interruption) = 1;
-
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_psr(VCPU * vcpu, u64 val)
-{
- IA64_PSR newpsr, vpsr;
- REGS *regs = vcpu_regs(vcpu);
- u64 enabling_interrupts = 0;
-
- /* Copy non-virtualized bits. */
- newpsr.val = val & IA64_PSR_NON_VIRT_BITS;
-
- /* Bits forced to 1 (psr.si, psr.is and psr.mc are forced to 0) */
- newpsr.val |= IA64_PSR_DI;
-
- newpsr.val |= IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_RT |
- IA64_PSR_IT | IA64_PSR_BN | IA64_PSR_DI;
- /*
- * xenoprof:
- * keep psr.pp unchanged for xenoprof.
- */
- if (regs->cr_ipsr & IA64_PSR_PP)
- newpsr.val |= IA64_PSR_PP;
- else
- newpsr.val &= ~IA64_PSR_PP;
-
- vpsr.val = val;
-
- if (val & IA64_PSR_DFH) {
- newpsr.dfh = 1;
- PSCB(vcpu, vpsr_dfh) = 1;
- } else {
- newpsr.dfh = PSCB(vcpu, hpsr_dfh);
- PSCB(vcpu, vpsr_dfh) = 0;
- }
-
- PSCB(vcpu, vpsr_pp) = vpsr.pp;
-
- if (vpsr.i) {
- if (vcpu->vcpu_info->evtchn_upcall_mask)
- enabling_interrupts = 1;
-
- vcpu->vcpu_info->evtchn_upcall_mask = 0;
-
- if (enabling_interrupts &&
- vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
- PSCB(vcpu, pending_interruption) = 1;
- } else
- vcpu->vcpu_info->evtchn_upcall_mask = 1;
-
- PSCB(vcpu, interrupt_collection_enabled) = vpsr.ic;
- vcpu_set_metaphysical_mode(vcpu, !(vpsr.dt && vpsr.rt && vpsr.it));
-
- newpsr.cpl |= max_t(u64, vpsr.cpl, CONFIG_CPL0_EMUL);
-
- if (PSCB(vcpu, banknum) != vpsr.bn) {
- if (vpsr.bn)
- vcpu_bsw1(vcpu);
- else
- vcpu_bsw0(vcpu);
- }
- if (vpsr.pk) {
- vcpu_pkr_set_psr_handling(vcpu);
- newpsr.pk = 1;
- } else
- vcpu_pkr_use_unset(vcpu);
-
- regs->cr_ipsr = newpsr.val;
-
- return IA64_NO_FAULT;
-}
-
-u64 vcpu_get_psr(VCPU * vcpu)
-{
- REGS *regs = vcpu_regs(vcpu);
- PSR newpsr;
- PSR ipsr;
-
- ipsr.i64 = regs->cr_ipsr;
-
- /* Copy non-virtualized bits. */
- newpsr.i64 = ipsr.i64 & IA64_PSR_NON_VIRT_BITS;
-
- /* Bits forced to 1 (psr.si and psr.is are forced to 0) */
- newpsr.i64 |= IA64_PSR_DI;
-
- /* System mask. */
- newpsr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled);
- newpsr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
-
- if (!PSCB(vcpu, metaphysical_mode))
- newpsr.i64 |= IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT;
-
- newpsr.ia64_psr.dfh = PSCB(vcpu, vpsr_dfh);
- newpsr.ia64_psr.pp = PSCB(vcpu, vpsr_pp);
-
- /* Fool cpl. */
- if (ipsr.ia64_psr.cpl <= CONFIG_CPL0_EMUL)
- newpsr.ia64_psr.cpl = 0;
- else
- newpsr.ia64_psr.cpl = ipsr.ia64_psr.cpl;
-
- newpsr.ia64_psr.bn = PSCB(vcpu, banknum);
-
- return newpsr.i64;
-}
-
-IA64FAULT vcpu_get_psr_masked(VCPU * vcpu, u64 * pval)
-{
- u64 psr = vcpu_get_psr(vcpu);
- *pval = psr & (MASK(0, 32) | MASK(35, 2));
- return IA64_NO_FAULT;
-}
-
-BOOLEAN vcpu_get_psr_ic(VCPU * vcpu)
-{
- return !!PSCB(vcpu, interrupt_collection_enabled);
-}
-
-BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
-{
- return !vcpu->vcpu_info->evtchn_upcall_mask;
-}
-
-
-/**************************************************************************
- VCPU interrupt control register access routines
-**************************************************************************/
-
-void vcpu_pend_unspecified_interrupt(VCPU * vcpu)
-{
- PSCB(vcpu, pending_interruption) = 1;
-}
-
-void vcpu_pend_interrupt(VCPU * vcpu, u64 vector)
-{
- if (vector & ~0xff) {
- printk("vcpu_pend_interrupt: bad vector\n");
- return;
- }
-
- if (vcpu->arch.event_callback_ip) {
- printk("Deprecated interface. Move to new event based "
- "solution\n");
- return;
- }
-
- if (VMX_DOMAIN(vcpu)) {
- set_bit(vector, VCPU(vcpu, irr));
- } else {
- set_bit(vector, PSCBX(vcpu, irr));
- PSCB(vcpu, pending_interruption) = 1;
- }
-}
-
-#define IA64_TPR_MMI 0x10000
-#define IA64_TPR_MIC 0x000f0
-
-/* checks to see if a VCPU has any unmasked pending interrupts
- * if so, returns the highest, else returns SPURIOUS_VECTOR */
-/* NOTE: Since this gets called from vcpu_get_ivr() and the
- * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
- * this routine also ignores pscb.interrupt_delivery_enabled
- * and this must be checked independently; see vcpu_deliverable interrupts() */
-u64 vcpu_check_pending_interrupts(VCPU * vcpu)
-{
- u64 *p, *r, bits, bitnum, mask, i, vector;
-
- if (vcpu->arch.event_callback_ip)
- return SPURIOUS_VECTOR;
-
- /* Always check pending event, since guest may just ack the
- * event injection without handle. Later guest may throw out
- * the event itself.
- */
- check_start:
- if (event_pending(vcpu) &&
- !test_bit(vcpu->domain->shared_info->arch.evtchn_vector,
- &PSCBX(vcpu, insvc[0])))
- vcpu_pend_interrupt(vcpu,
- vcpu->domain->shared_info->arch.
- evtchn_vector);
-
- p = &PSCBX(vcpu, irr[3]);
- r = &PSCBX(vcpu, insvc[3]);
- for (i = 3 ;; p--, r--, i--) {
- bits = *p;
- if (bits)
- break; // got a potential interrupt
- if (*r) {
- // nothing in this word which is pending+inservice
- // but there is one inservice which masks lower
- return SPURIOUS_VECTOR;
- }
- if (i == 0) {
- // checked all bits... nothing pending+inservice
- return SPURIOUS_VECTOR;
- }
- }
- // have a pending,deliverable interrupt... see if it is masked
- bitnum = ia64_fls(bits);
-//printk("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...\n",bitnum);
- vector = bitnum + (i * 64);
- mask = 1L << bitnum;
- /* sanity check for guest timer interrupt */
- if (vector == (PSCB(vcpu, itv) & 0xff)) {
- uint64_t now = ia64_get_itc();
- if (now < PSCBX(vcpu, domain_itm)) {
-// printk("Ooops, pending guest timer before its due\n");
- PSCBX(vcpu, irr[i]) &= ~mask;
- goto check_start;
- }
- }
-//printk("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...\n",vector);
- if (*r >= mask) {
- // masked by equal inservice
-//printk("but masked by equal inservice\n");
- return SPURIOUS_VECTOR;
- }
- if (PSCB(vcpu, tpr) & IA64_TPR_MMI) {
- // tpr.mmi is set
-//printk("but masked by tpr.mmi\n");
- return SPURIOUS_VECTOR;
- }
- if (((PSCB(vcpu, tpr) & IA64_TPR_MIC) + 15) >= vector) {
- //tpr.mic masks class
-//printk("but masked by tpr.mic\n");
- return SPURIOUS_VECTOR;
- }
-//printk("returned to caller\n");
- return vector;
-}
-
-u64 vcpu_deliverable_interrupts(VCPU * vcpu)
-{
- return (vcpu_get_psr_i(vcpu) &&
- vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
-}
-
-u64 vcpu_deliverable_timer(VCPU * vcpu)
-{
- return (vcpu_get_psr_i(vcpu) &&
- vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu, itv));
-}
-
-IA64FAULT vcpu_get_lid(VCPU * vcpu, u64 * pval)
-{
- /* Use EID=0, ID=vcpu_id. */
- *pval = vcpu->vcpu_id << 24;
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_ivr(VCPU * vcpu, u64 * pval)
-{
- int i;
- u64 vector, mask;
-
-#define HEARTBEAT_FREQ 16 // period in seconds
-#ifdef HEARTBEAT_FREQ
-#define N_DOMS 16 // period in seconds
-#if 0
- static long count[N_DOMS] = { 0 };
-#endif
- static long nonclockcount[N_DOMS] = { 0 };
- unsigned domid = vcpu->domain->domain_id;
-#endif
-#ifdef IRQ_DEBUG
- static char firstivr = 1;
- static char firsttime[256];
- if (firstivr) {
- int i;
- for (i = 0; i < 256; i++)
- firsttime[i] = 1;
- firstivr = 0;
- }
-#endif
-
- vector = vcpu_check_pending_interrupts(vcpu);
- if (vector == SPURIOUS_VECTOR) {
- PSCB(vcpu, pending_interruption) = 0;
- *pval = vector;
- return IA64_NO_FAULT;
- }
-#ifdef HEARTBEAT_FREQ
- if (domid >= N_DOMS)
- domid = N_DOMS - 1;
-#if 0
- if (vector == (PSCB(vcpu, itv) & 0xff)) {
- if (!(++count[domid] & ((HEARTBEAT_FREQ * 1024) - 1))) {
- printk("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
- domid, count[domid], nonclockcount[domid]);
- //count[domid] = 0;
- //dump_runq();
- }
- }
-#endif
- else
- nonclockcount[domid]++;
-#endif
- // now have an unmasked, pending, deliverable vector!
- // getting ivr has "side effects"
-#ifdef IRQ_DEBUG
- if (firsttime[vector]) {
- printk("*** First get_ivr on vector=%lu,itc=%lx\n",
- vector, ia64_get_itc());
- firsttime[vector] = 0;
- }
-#endif
- /* if delivering a timer interrupt, remember domain_itm, which
- * needs to be done before clearing irr
- */
- if (vector == (PSCB(vcpu, itv) & 0xff)) {
- PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
- }
-
- i = vector >> 6;
- mask = 1L << (vector & 0x3f);
-//printk("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
- PSCBX(vcpu, insvc[i]) |= mask;
- PSCBX(vcpu, irr[i]) &= ~mask;
- //PSCB(vcpu,pending_interruption)--;
- *pval = vector;
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_tpr(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCB(vcpu, tpr);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_eoi(VCPU * vcpu, u64 * pval)
-{
- *pval = 0L; // reads of eoi always return 0
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_irr0(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCBX(vcpu, irr[0]);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_irr1(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCBX(vcpu, irr[1]);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_irr2(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCBX(vcpu, irr[2]);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_irr3(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCBX(vcpu, irr[3]);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_itv(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCB(vcpu, itv);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_pmv(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCB(vcpu, pmv);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_cmcv(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCB(vcpu, cmcv);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_lrr0(VCPU * vcpu, u64 * pval)
-{
- // fix this when setting values other than m-bit is supported
- gdprintk(XENLOG_DEBUG,
- "vcpu_get_lrr0: Unmasked interrupts unsupported\n");
- *pval = (1L << 16);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_lrr1(VCPU * vcpu, u64 * pval)
-{
- // fix this when setting values other than m-bit is supported
- gdprintk(XENLOG_DEBUG,
- "vcpu_get_lrr1: Unmasked interrupts unsupported\n");
- *pval = (1L << 16);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_lid(VCPU * vcpu, u64 val)
-{
- printk("vcpu_set_lid: Setting cr.lid is unsupported\n");
- return IA64_ILLOP_FAULT;
-}
-
-IA64FAULT vcpu_set_tpr(VCPU * vcpu, u64 val)
-{
- if (val & 0xff00)
- return IA64_RSVDREG_FAULT;
- PSCB(vcpu, tpr) = val;
- /* This can unmask interrupts. */
- if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
- PSCB(vcpu, pending_interruption) = 1;
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_eoi(VCPU * vcpu, u64 val)
-{
- u64 *p, bits, vec, bitnum;
- int i;
-
- p = &PSCBX(vcpu, insvc[3]);
- for (i = 3; (i >= 0) && !(bits = *p); i--, p--)
- ;
- if (i < 0) {
- printk("Trying to EOI interrupt when none are in-service.\n");
- return IA64_NO_FAULT;
- }
- bitnum = ia64_fls(bits);
- vec = bitnum + (i * 64);
- /* clear the correct bit */
- bits &= ~(1L << bitnum);
- *p = bits;
- /* clearing an eoi bit may unmask another pending interrupt... */
- if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
- // worry about this later... Linux only calls eoi
- // with interrupts disabled
- printk("Trying to EOI interrupt with interrupts enabled\n");
- }
- if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
- PSCB(vcpu, pending_interruption) = 1;
-//printk("YYYYY vcpu_set_eoi: Successful\n");
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_lrr0(VCPU * vcpu, u64 val)
-{
- if (!(val & (1L << 16))) {
- printk("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
- return IA64_ILLOP_FAULT;
- }
- // no place to save this state but nothing to do anyway
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_lrr1(VCPU * vcpu, u64 val)
-{
- if (!(val & (1L << 16))) {
- printk("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
- return IA64_ILLOP_FAULT;
- }
- // no place to save this state but nothing to do anyway
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_itv(VCPU * vcpu, u64 val)
-{
- /* Check reserved fields. */
- if (val & 0xef00)
- return IA64_ILLOP_FAULT;
- PSCB(vcpu, itv) = val;
- if (val & 0x10000) {
- /* Disable itm. */
- PSCBX(vcpu, domain_itm) = 0;
- } else
- vcpu_set_next_timer(vcpu);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_pmv(VCPU * vcpu, u64 val)
-{
- if (val & 0xef00) /* reserved fields */
- return IA64_RSVDREG_FAULT;
- PSCB(vcpu, pmv) = val;
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_cmcv(VCPU * vcpu, u64 val)
-{
- if (val & 0xef00) /* reserved fields */
- return IA64_RSVDREG_FAULT;
- PSCB(vcpu, cmcv) = val;
- return IA64_NO_FAULT;
-}
-
-/**************************************************************************
- VCPU temporary register access routines
-**************************************************************************/
-u64 vcpu_get_tmp(VCPU * vcpu, u64 index)
-{
- if (index > 7)
- return 0;
- return PSCB(vcpu, tmp[index]);
-}
-
-void vcpu_set_tmp(VCPU * vcpu, u64 index, u64 val)
-{
- if (index <= 7)
- PSCB(vcpu, tmp[index]) = val;
-}
-
-/**************************************************************************
-Interval timer routines
-**************************************************************************/
-
-BOOLEAN vcpu_timer_disabled(VCPU * vcpu)
-{
- u64 itv = PSCB(vcpu, itv);
- return (!itv || !!(itv & 0x10000));
-}
-
-BOOLEAN vcpu_timer_inservice(VCPU * vcpu)
-{
- u64 itv = PSCB(vcpu, itv);
- return test_bit(itv, PSCBX(vcpu, insvc));
-}
-
-BOOLEAN vcpu_timer_expired(VCPU * vcpu)
-{
- unsigned long domain_itm = PSCBX(vcpu, domain_itm);
- unsigned long now = ia64_get_itc();
-
- if (!domain_itm)
- return FALSE;
- if (now < domain_itm)
- return FALSE;
- if (vcpu_timer_disabled(vcpu))
- return FALSE;
- return TRUE;
-}
-
-void vcpu_safe_set_itm(unsigned long val)
-{
- unsigned long epsilon = 100;
- unsigned long flags;
- u64 now = ia64_get_itc();
-
- local_irq_save(flags);
- while (1) {
-//printk("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
- ia64_set_itm(val);
- if (val > (now = ia64_get_itc()))
- break;
- val = now + epsilon;
- epsilon <<= 1;
- }
- local_irq_restore(flags);
-}
-
-void vcpu_set_next_timer(VCPU * vcpu)
-{
- u64 d = PSCBX(vcpu, domain_itm);
- //u64 s = PSCBX(vcpu,xen_itm);
- u64 s = local_cpu_data->itm_next;
- u64 now = ia64_get_itc();
-
- /* gloss over the wraparound problem for now... we know it exists
- * but it doesn't matter right now */
-
- if (is_idle_domain(vcpu->domain)) {
-// printk("****** vcpu_set_next_timer called during idle!!\n");
- vcpu_safe_set_itm(s);
- return;
- }
- //s = PSCBX(vcpu,xen_itm);
- if (d && (d > now) && (d < s)) {
- vcpu_safe_set_itm(d);
- //using_domain_as_itm++;
- } else {
- vcpu_safe_set_itm(s);
- //using_xen_as_itm++;
- }
-}
-
-IA64FAULT vcpu_set_itm(VCPU * vcpu, u64 val)
-{
- //UINT now = ia64_get_itc();
-
- //if (val < now) val = now + 1000;
-//printk("*** vcpu_set_itm: called with %lx\n",val);
- PSCBX(vcpu, domain_itm) = val;
- vcpu_set_next_timer(vcpu);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_itc(VCPU * vcpu, u64 val)
-{
-#define DISALLOW_SETTING_ITC_FOR_NOW
-#ifdef DISALLOW_SETTING_ITC_FOR_NOW
- static int did_print;
- if (!did_print) {
- printk("vcpu_set_itc: Setting ar.itc is currently disabled "
- "(this message is only displayed once)\n");
- did_print = 1;
- }
-#else
- u64 oldnow = ia64_get_itc();
- u64 olditm = PSCBX(vcpu, domain_itm);
- unsigned long d = olditm - oldnow;
- unsigned long x = local_cpu_data->itm_next - oldnow;
-
- u64 newnow = val, min_delta;
-
- local_irq_disable();
- if (olditm) {
- printk("**** vcpu_set_itc(%lx): vitm changed to %lx\n", val,
- newnow + d);
- PSCBX(vcpu, domain_itm) = newnow + d;
- }
- local_cpu_data->itm_next = newnow + x;
- d = PSCBX(vcpu, domain_itm);
- x = local_cpu_data->itm_next;
-
- ia64_set_itc(newnow);
- if (d && (d > newnow) && (d < x)) {
- vcpu_safe_set_itm(d);
- //using_domain_as_itm++;
- } else {
- vcpu_safe_set_itm(x);
- //using_xen_as_itm++;
- }
- local_irq_enable();
-#endif
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_itm(VCPU * vcpu, u64 * pval)
-{
- //FIXME: Implement this
- printk("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
- return IA64_NO_FAULT;
- //return IA64_ILLOP_FAULT;
-}
-
-IA64FAULT vcpu_get_itc(VCPU * vcpu, u64 * pval)
-{
- //TODO: Implement this
- printk("vcpu_get_itc: Getting ar.itc is unsupported\n");
- return IA64_ILLOP_FAULT;
-}
-
-void vcpu_pend_timer(VCPU * vcpu)
-{
- u64 itv = PSCB(vcpu, itv) & 0xff;
-
- if (vcpu_timer_disabled(vcpu))
- return;
- //if (vcpu_timer_inservice(vcpu)) return;
- if (PSCBX(vcpu, domain_itm_last) == PSCBX(vcpu, domain_itm)) {
- // already delivered an interrupt for this so
- // don't deliver another
- return;
- }
- if (vcpu->arch.event_callback_ip) {
- /* A small window may occur when injecting vIRQ while related
- * handler has not been registered. Don't fire in such case.
- */
- if (vcpu->virq_to_evtchn[VIRQ_ITC]) {
- send_guest_vcpu_virq(vcpu, VIRQ_ITC);
- PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
- }
- } else
- vcpu_pend_interrupt(vcpu, itv);
-}
-
-// returns true if ready to deliver a timer interrupt too early
-u64 vcpu_timer_pending_early(VCPU * vcpu)
-{
- u64 now = ia64_get_itc();
- u64 itm = PSCBX(vcpu, domain_itm);
-
- if (vcpu_timer_disabled(vcpu))
- return 0;
- if (!itm)
- return 0;
- return (vcpu_deliverable_timer(vcpu) && (now < itm));
-}
-
-/**************************************************************************
-Privileged operation emulation routines
-**************************************************************************/
-
-static void vcpu_force_tlb_miss(VCPU * vcpu, u64 ifa)
-{
- PSCB(vcpu, ifa) = ifa;
- PSCB(vcpu, itir) = vcpu_get_itir_on_fault(vcpu, ifa);
- vcpu_thash(current, ifa, &PSCB(current, iha));
-}
-
-IA64FAULT vcpu_force_inst_miss(VCPU * vcpu, u64 ifa)
-{
- vcpu_force_tlb_miss(vcpu, ifa);
- return vcpu_get_rr_ve(vcpu, ifa) ? IA64_INST_TLB_VECTOR :
- IA64_ALT_INST_TLB_VECTOR;
-}
-
-IA64FAULT vcpu_force_data_miss(VCPU * vcpu, u64 ifa)
-{
- vcpu_force_tlb_miss(vcpu, ifa);
- return vcpu_get_rr_ve(vcpu, ifa) ? IA64_DATA_TLB_VECTOR :
- IA64_ALT_DATA_TLB_VECTOR;
-}
-
-IA64FAULT vcpu_rfi(VCPU * vcpu)
-{
- u64 ifs;
- REGS *regs = vcpu_regs(vcpu);
-
- vcpu_set_psr(vcpu, PSCB(vcpu, ipsr));
-
- ifs = PSCB(vcpu, ifs);
- if (ifs & 0x8000000000000000UL)
- regs->cr_ifs = ifs;
-
- regs->cr_iip = PSCB(vcpu, iip);
-
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_cover(VCPU * vcpu)
-{
- // TODO: Only allowed for current vcpu
- REGS *regs = vcpu_regs(vcpu);
-
- if (!PSCB(vcpu, interrupt_collection_enabled)) {
- PSCB(vcpu, ifs) = regs->cr_ifs;
- }
- regs->cr_ifs = 0;
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval)
-{
- u64 pta = PSCB(vcpu, pta);
- u64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
- u64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT) - 1);
- u64 Mask = (1L << pta_sz) - 1;
- u64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
- u64 compMask_60_15 = ~Mask_60_15;
- u64 rr_ps = vcpu_get_rr_ps(vcpu, vadr);
- u64 VHPT_offset = (vadr >> rr_ps) << 3;
- u64 VHPT_addr1 = vadr & 0xe000000000000000L;
- u64 VHPT_addr2a =
- ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
- u64 VHPT_addr2b =
- ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
- u64 VHPT_addr3 = VHPT_offset & 0x7fff;
- u64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
- VHPT_addr3;
-
-//verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
- *pval = VHPT_addr;
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * padr)
-{
- printk("vcpu_ttag: ttag instruction unsupported\n");
- return IA64_ILLOP_FAULT;
-}
-
-int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
-
-/* Return TRUE iff [b1,e1] and [b2,e2] partially or fully overlaps. */
-static inline int range_overlap(u64 b1, u64 e1, u64 b2, u64 e2)
-{
- return (b1 <= e2) && (e1 >= b2);
-}
-
-/* Crash domain if [base, base + page_size] and Xen virtual space overlaps.
- Note: LSBs of base inside page_size are ignored. */
-static inline void
-check_xen_space_overlap(const char *func, u64 base, u64 page_size)
-{
- /* Overlaps can occur only in region 7.
- (This is an optimization to bypass all the checks). */
- if (REGION_NUMBER(base) != 7)
- return;
-
- /* Mask LSBs of base. */
- base &= ~(page_size - 1);
-
- /* FIXME: ideally an MCA should be generated... */
- if (range_overlap(HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,
- base, base + page_size)
- || range_overlap(current->domain->arch.shared_info_va,
- current->domain->arch.shared_info_va
- + XSI_SIZE + XMAPPEDREGS_SIZE,
- base, base + page_size))
- panic_domain(NULL, "%s on Xen virtual space (%lx)\n",
- func, base);
-}
-
-// FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
-static inline int vcpu_match_tr_entry_no_p(TR_ENTRY * trp, u64 ifa,
- u64 rid)
-{
- return trp->rid == rid
- && ifa >= trp->vadr && ifa <= (trp->vadr + (1L << trp->ps) - 1);
-}
-
-static inline int vcpu_match_tr_entry(TR_ENTRY * trp, u64 ifa, u64 rid)
-{
- return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
-}
-
-static inline int
-vcpu_match_tr_entry_range(TR_ENTRY * trp, u64 rid, u64 b, u64 e)
-{
- return trp->rid == rid
- && trp->pte.p
- && range_overlap(b, e, trp->vadr, trp->vadr + (1L << trp->ps) - 1);
-
-}
-
-static TR_ENTRY *vcpu_tr_lookup(VCPU * vcpu, unsigned long va, u64 rid,
- BOOLEAN is_data)
-{
- unsigned char *regions;
- TR_ENTRY *trp;
- int tr_max;
- int i;
-
- if (is_data) {
- // data
- regions = &vcpu->arch.dtr_regions;
- trp = vcpu->arch.dtrs;
- tr_max = sizeof(vcpu->arch.dtrs) / sizeof(vcpu->arch.dtrs[0]);
- } else {
- // instruction
- regions = &vcpu->arch.itr_regions;
- trp = vcpu->arch.itrs;
- tr_max = sizeof(vcpu->arch.itrs) / sizeof(vcpu->arch.itrs[0]);
- }
-
- if (!vcpu_quick_region_check(*regions, va)) {
- return NULL;
- }
- for (i = 0; i < tr_max; i++, trp++) {
- if (vcpu_match_tr_entry(trp, va, rid)) {
- return trp;
- }
- }
- return NULL;
-}
-
-// return value
-// 0: failure
-// 1: success
-int
-vcpu_get_domain_bundle(VCPU * vcpu, REGS * regs, u64 gip,
- IA64_BUNDLE * bundle)
-{
- u64 gpip; // guest pseudo phyiscal ip
- unsigned long vaddr;
- struct page_info *page;
-
- again:
-#if 0
- // Currently xen doesn't track psr.it bits.
- // it assumes always psr.it = 1.
- if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
- gpip = gip;
- } else
-#endif
- {
- unsigned long region = REGION_NUMBER(gip);
- unsigned long rr = PSCB(vcpu, rrs)[region];
- unsigned long rid = rr & RR_RID_MASK;
- BOOLEAN swap_rr0;
- TR_ENTRY *trp;
-
- // vcpu->arch.{i, d}tlb are volatile,
- // copy its value to the variable, tr, before use.
- TR_ENTRY tr;
-
- // fast path:
- // try to access gip with guest virtual address directly.
- // This may cause tlb miss. see vcpu_translate(). Be careful!
- swap_rr0 = (!region && PSCB(vcpu, metaphysical_mode));
- if (swap_rr0) {
- set_virtual_rr0();
- }
- *bundle = __get_domain_bundle(gip);
- if (swap_rr0) {
- set_metaphysical_rr0();
- }
-
- if (!bundle->i64[0] && !bundle->i64[1]) {
- dprintk(XENLOG_INFO, "%s gip 0x%lx\n", __func__, gip);
- } else {
- // Okay, mDTC successed
- return 1;
- }
- // mDTC failed, so try vTLB.
-
- trp = vcpu_tr_lookup(vcpu, gip, rid, 0);
- if (trp != NULL) {
- tr = *trp;
- goto found;
- }
- // When it failed to get a bundle, itlb miss is reflected.
- // Last itc.i value is cached to PSCBX(vcpu, itlb).
- tr = PSCBX(vcpu, itlb);
- if (vcpu_match_tr_entry(&tr, gip, rid)) {
- //dprintk(XENLOG_WARNING,
- // "%s gip 0x%lx gpip 0x%lx\n", __func__,
- // gip, gpip);
- goto found;
- }
- trp = vcpu_tr_lookup(vcpu, gip, rid, 1);
- if (trp != NULL) {
- tr = *trp;
- goto found;
- }
- tr = PSCBX(vcpu, dtlb);
- if (vcpu_match_tr_entry(&tr, gip, rid)) {
- goto found;
- }
-
- // mDTC and vTLB failed. so reflect tlb miss into the guest.
- return 0;
-
- found:
- gpip = ((tr.pte.ppn >> (tr.ps - 12)) << tr.ps) |
- (gip & ((1 << tr.ps) - 1));
- }
-
- vaddr = (unsigned long)domain_mpa_to_imva(vcpu->domain, gpip);
- page = virt_to_page(vaddr);
- if (get_page(page, vcpu->domain) == 0) {
- if (page_get_owner(page) != vcpu->domain) {
- // This page might be a page granted by another
- // domain.
- panic_domain(regs, "domain tries to execute foreign "
- "domain page which might be mapped by "
- "grant table.\n");
- }
- goto again;
- }
- *bundle = *((IA64_BUNDLE *) vaddr);
- put_page(page);
- return 1;
-}
-
-IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,
- u64 * pteval, u64 * itir, u64 * iha)
-{
- unsigned long region = REGION_NUMBER(address);
- unsigned long pta, rid, rr, key = 0;
- union pte_flags pte;
- TR_ENTRY *trp;
-
- if (PSCB(vcpu, metaphysical_mode) && !(!is_data && region)) {
- // dom0 may generate an uncacheable physical address (msb=1)
- if (region && ((region != 4) || (vcpu->domain != dom0))) {
-// FIXME: This seems to happen even though it shouldn't. Need to track
-// this down, but since it has been apparently harmless, just flag it for now
-// panic_domain(vcpu_regs(vcpu),
-
- /*
- * Guest may execute itc.d and rfi with psr.dt=0
- * When VMM try to fetch opcode, tlb miss may happen,
- * At this time PSCB(vcpu,metaphysical_mode)=1,
- * region=5,VMM need to handle this tlb miss as if
- * PSCB(vcpu,metaphysical_mode)=0
- */
- printk("vcpu_translate: bad physical address: 0x%lx "
- "at %lx\n", address, vcpu_regs(vcpu)->cr_iip);
-
- } else {
- *pteval = (address & _PAGE_PPN_MASK) |
- __DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RWX;
- *itir = vcpu->arch.vhpt_pg_shift << 2;
- perfc_incr(phys_translate);
- return IA64_NO_FAULT;
- }
- } else if (!region && warn_region0_address) {
- REGS *regs = vcpu_regs(vcpu);
- unsigned long viip = PSCB(vcpu, iip);
- unsigned long vipsr = PSCB(vcpu, ipsr);
- unsigned long iip = regs->cr_iip;
- unsigned long ipsr = regs->cr_ipsr;
- printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, "
- "vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
- address, viip, vipsr, iip, ipsr);
- }
-
- rr = PSCB(vcpu, rrs)[region];
- rid = rr & RR_RID_MASK;
- if (is_data) {
- trp = vcpu_tr_lookup(vcpu, address, rid, 1);
- if (trp != NULL) {
- *pteval = trp->pte.val;
- *itir = trp->itir;
- perfc_incr(tr_translate);
- return IA64_NO_FAULT;
- }
- }
- // FIXME?: check itr's for data accesses too, else bad things happen?
- /* else */ {
- trp = vcpu_tr_lookup(vcpu, address, rid, 0);
- if (trp != NULL) {
- *pteval = trp->pte.val;
- *itir = trp->itir;
- perfc_incr(tr_translate);
- return IA64_NO_FAULT;
- }
- }
-
- /* check 1-entry TLB */
- // FIXME?: check dtlb for inst accesses too, else bad things happen?
- trp = &vcpu->arch.dtlb;
- pte = trp->pte;
- if ( /* is_data && */ pte.p
- && vcpu_match_tr_entry_no_p(trp, address, rid)) {
- *pteval = pte.val;
- *itir = trp->itir;
- perfc_incr(dtlb_translate);
- return IA64_USE_TLB;
- }
-
- /* check guest VHPT */
- pta = PSCB(vcpu, pta);
-
- *itir = rr & (RR_RID_MASK | RR_PS_MASK);
- // note: architecturally, iha is optionally set for alt faults but
- // xenlinux depends on it so should document it as part of PV interface
- vcpu_thash(vcpu, address, iha);
- if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE)) {
- REGS *regs = vcpu_regs(vcpu);
- struct opt_feature* optf = &(vcpu->domain->arch.opt_feature);
-
- /* Optimization for identity mapped region 7 OS (linux) */
- if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG7_FLG &&
- region == 7 && ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL &&
- REGION_OFFSET(address) < _PAGE_PPN_MASK) {
- pte.val = address & _PAGE_PPN_MASK;
- pte.val = pte.val | optf->im_reg7.pgprot;
- key = optf->im_reg7.key;
- goto out;
- }
- return is_data ? IA64_ALT_DATA_TLB_VECTOR :
- IA64_ALT_INST_TLB_VECTOR;
- }
-
- if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
- /*
- * minimal support: vhpt walker is really dumb and won't find
- * anything
- */
- return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
- }
- /* avoid recursively walking (short format) VHPT */
- if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
- return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
-
- if (!__access_ok(*iha)
- || __copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
- // virtual VHPT walker "missed" in TLB
- return IA64_VHPT_FAULT;
-
- /*
- * Optimisation: this VHPT walker aborts on not-present pages
- * instead of inserting a not-present translation, this allows
- * vectoring directly to the miss handler.
- */
- if (!pte.p)
- return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
-
- /* found mapping in guest VHPT! */
-out:
- *itir = (rr & RR_PS_MASK) | (key << IA64_ITIR_KEY);
- *pteval = pte.val;
- perfc_incr(vhpt_translate);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr)
-{
- u64 pteval, itir, mask, iha;
- IA64FAULT fault;
-
- fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
- if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
- mask = itir_mask(itir);
- *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
- return IA64_NO_FAULT;
- }
- return vcpu_force_data_miss(vcpu, vadr);
-}
-
-IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key)
-{
- u64 pteval, itir, iha;
- IA64FAULT fault;
-
- fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
- if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
- *key = itir & IA64_ITIR_KEY_MASK;
- else
- *key = 1;
-
- return IA64_NO_FAULT;
-}
-
-/**************************************************************************
- VCPU debug breakpoint register access routines
-**************************************************************************/
-
-IA64FAULT vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
-{
- if (reg >= IA64_NUM_DBG_REGS)
- return IA64_RSVDREG_FAULT;
- if ((reg & 1) == 0) {
- /* Validate address. */
- if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)
- return IA64_ILLOP_FAULT;
- } else {
- if (!VMX_DOMAIN(vcpu)) {
- /* Mask PL0. */
- val &= ~(1UL << 56);
- }
- }
- if (val != 0)
- vcpu->arch.dbg_used |= (1 << reg);
- else
- vcpu->arch.dbg_used &= ~(1 << reg);
- vcpu->arch.dbr[reg] = val;
- if (vcpu == current)
- ia64_set_dbr(reg, val);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
-{
- if (reg >= IA64_NUM_DBG_REGS)
- return IA64_RSVDREG_FAULT;
- if ((reg & 1) == 0) {
- /* Validate address. */
- if (val >= HYPERVISOR_VIRT_START && val <= HYPERVISOR_VIRT_END)
- return IA64_ILLOP_FAULT;
- } else {
- if (!VMX_DOMAIN(vcpu)) {
- /* Mask PL0. */
- val &= ~(1UL << 56);
- }
- }
- if (val != 0)
- vcpu->arch.dbg_used |= (1 << (reg + IA64_NUM_DBG_REGS));
- else
- vcpu->arch.dbg_used &= ~(1 << (reg + IA64_NUM_DBG_REGS));
- vcpu->arch.ibr[reg] = val;
- if (vcpu == current)
- ia64_set_ibr(reg, val);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval)
-{
- if (reg >= IA64_NUM_DBG_REGS)
- return IA64_RSVDREG_FAULT;
- *pval = vcpu->arch.dbr[reg];
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval)
-{
- if (reg >= IA64_NUM_DBG_REGS)
- return IA64_RSVDREG_FAULT;
- *pval = vcpu->arch.ibr[reg];
- return IA64_NO_FAULT;
-}
-
-/**************************************************************************
- VCPU performance monitor register access routines
-**************************************************************************/
-
-IA64FAULT vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val)
-{
- // TODO: Should set Logical CPU state, not just physical
- // NOTE: Writes to unimplemented PMC registers are discarded
-#ifdef DEBUG_PFMON
- printk("vcpu_set_pmc(%x,%lx)\n", reg, val);
-#endif
- ia64_set_pmc(reg, val);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val)
-{
- // TODO: Should set Logical CPU state, not just physical
- // NOTE: Writes to unimplemented PMD registers are discarded
-#ifdef DEBUG_PFMON
- printk("vcpu_set_pmd(%x,%lx)\n", reg, val);
-#endif
- ia64_set_pmd(reg, val);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval)
-{
- // NOTE: Reads from unimplemented PMC registers return zero
- u64 val = (u64) ia64_get_pmc(reg);
-#ifdef DEBUG_PFMON
- printk("%lx=vcpu_get_pmc(%x)\n", val, reg);
-#endif
- *pval = val;
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval)
-{
- // NOTE: Reads from unimplemented PMD registers return zero
- u64 val = (u64) ia64_get_pmd(reg);
-#ifdef DEBUG_PFMON
- printk("%lx=vcpu_get_pmd(%x)\n", val, reg);
-#endif
- *pval = val;
- return IA64_NO_FAULT;
-}
-
-/**************************************************************************
- VCPU banked general register access routines
-**************************************************************************/
-#define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
-do{ \
- __asm__ __volatile__ ( \
- ";;extr.u %0 = %3,%6,16;;\n" \
- "dep %1 = %0, %1, 0, 16;;\n" \
- "st8 [%4] = %1\n" \
- "extr.u %0 = %2, 16, 16;;\n" \
- "dep %3 = %0, %3, %6, 16;;\n" \
- "st8 [%5] = %3\n" \
- ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
- "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
-}while(0)
-
-IA64FAULT vcpu_bsw0(VCPU * vcpu)
-{
- // TODO: Only allowed for current vcpu
- REGS *regs = vcpu_regs(vcpu);
- unsigned long *r = &regs->r16;
- unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
- unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
- unsigned long *runat = &regs->eml_unat;
- unsigned long *b0unat = &PSCB(vcpu, vbnat);
- unsigned long *b1unat = &PSCB(vcpu, vnat);
-
- unsigned long i;
-
- if (VMX_DOMAIN(vcpu)) {
- if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
- for (i = 0; i < 16; i++) {
- *b1++ = *r;
- *r++ = *b0++;
- }
- vcpu_bsw0_unat(i, b0unat, b1unat, runat,
- IA64_PT_REGS_R16_SLOT);
- VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
- }
- } else {
- if (PSCB(vcpu, banknum)) {
- for (i = 0; i < 16; i++) {
- *b1++ = *r;
- *r++ = *b0++;
- }
- vcpu_bsw0_unat(i, b0unat, b1unat, runat,
- IA64_PT_REGS_R16_SLOT);
- PSCB(vcpu, banknum) = 0;
- }
- }
- return IA64_NO_FAULT;
-}
-
-#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, IA64_PT_REGS_R16_SLOT) \
-do { \
- __asm__ __volatile__ (";;extr.u %0 = %3,%6,16;;\n" \
- "dep %1 = %0, %1, 16, 16;;\n" \
- "st8 [%4] = %1\n" \
- "extr.u %0 = %2, 0, 16;;\n" \
- "dep %3 = %0, %3, %6, 16;;\n" \
- "st8 [%5] = %3\n" \
- ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
- "r"(*runat), "r"(b0unat), "r"(runat), \
- "i"(IA64_PT_REGS_R16_SLOT): "memory"); \
-} while(0)
-
-IA64FAULT vcpu_bsw1(VCPU * vcpu)
-{
- // TODO: Only allowed for current vcpu
- REGS *regs = vcpu_regs(vcpu);
- unsigned long *r = &regs->r16;
- unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
- unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
- unsigned long *runat = &regs->eml_unat;
- unsigned long *b0unat = &PSCB(vcpu, vbnat);
- unsigned long *b1unat = &PSCB(vcpu, vnat);
-
- unsigned long i;
-
- if (VMX_DOMAIN(vcpu)) {
- if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
- for (i = 0; i < 16; i++) {
- *b0++ = *r;
- *r++ = *b1++;
- }
- vcpu_bsw1_unat(i, b0unat, b1unat, runat,
- IA64_PT_REGS_R16_SLOT);
- VCPU(vcpu, vpsr) |= IA64_PSR_BN;
- }
- } else {
- if (!PSCB(vcpu, banknum)) {
- for (i = 0; i < 16; i++) {
- *b0++ = *r;
- *r++ = *b1++;
- }
- vcpu_bsw1_unat(i, b0unat, b1unat, runat,
- IA64_PT_REGS_R16_SLOT);
- PSCB(vcpu, banknum) = 1;
- }
- }
- return IA64_NO_FAULT;
-}
-
-/**************************************************************************
- VCPU cpuid access routines
-**************************************************************************/
-
-IA64FAULT vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval)
-{
- // FIXME: This could get called as a result of a rsvd-reg fault
- // if reg > 3
- switch (reg) {
- case 0:
- memcpy(pval, "Xen/ia64", 8);
- break;
- case 1:
- *pval = 0;
- break;
- case 2:
- *pval = 0;
- break;
- case 3:
- *pval = ia64_get_cpuid(3);
- break;
- case 4:
- *pval = ia64_get_cpuid(4);
- break;
- default:
- if (reg > (ia64_get_cpuid(3) & 0xff))
- return IA64_RSVDREG_FAULT;
- *pval = ia64_get_cpuid(reg);
- break;
- }
- return IA64_NO_FAULT;
-}
-
-/**************************************************************************
- VCPU region register access routines
-**************************************************************************/
-
-unsigned long vcpu_get_rr_ve(VCPU * vcpu, u64 vadr)
-{
- ia64_rr rr;
-
- rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
- return rr.ve;
-}
-
-IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val)
-{
- if (unlikely(is_reserved_rr_field(vcpu, val))) {
- gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
- return IA64_RSVDREG_FAULT;
- }
-
- PSCB(vcpu, rrs)[reg >> 61] = val;
- if (likely(vcpu == current)) {
- int rc = set_one_rr(reg, val);
- BUG_ON(rc == 0);
- }
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval)
-{
- if (VMX_DOMAIN(vcpu))
- *pval = VMX(vcpu, vrr[reg >> 61]);
- else
- *pval = PSCB(vcpu, rrs)[reg >> 61];
-
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcpu, u64 val0, u64 val1, u64 val2,
- u64 val3, u64 val4)
-{
- u64 reg0 = 0x0000000000000000UL;
- u64 reg1 = 0x2000000000000000UL;
- u64 reg2 = 0x4000000000000000UL;
- u64 reg3 = 0x6000000000000000UL;
- u64 reg4 = 0x8000000000000000UL;
-
- if (unlikely(is_reserved_rr_field(vcpu, val0) ||
- is_reserved_rr_field(vcpu, val1) ||
- is_reserved_rr_field(vcpu, val2) ||
- is_reserved_rr_field(vcpu, val3) ||
- is_reserved_rr_field(vcpu, val4))) {
- gdprintk(XENLOG_DEBUG,
- "use of invalid rrval %lx %lx %lx %lx %lx\n",
- val0, val1, val2, val3, val4);
- return IA64_RSVDREG_FAULT;
- }
-
- PSCB(vcpu, rrs)[reg0 >> 61] = val0;
- PSCB(vcpu, rrs)[reg1 >> 61] = val1;
- PSCB(vcpu, rrs)[reg2 >> 61] = val2;
- PSCB(vcpu, rrs)[reg3 >> 61] = val3;
- PSCB(vcpu, rrs)[reg4 >> 61] = val4;
- if (likely(vcpu == current)) {
- int rc;
- rc = !set_one_rr(reg0, val0);
- rc |= !set_one_rr(reg1, val1);
- rc |= !set_one_rr(reg2, val2);
- rc |= !set_one_rr(reg3, val3);
- rc |= !set_one_rr(reg4, val4);
- BUG_ON(rc != 0);
- }
- return IA64_NO_FAULT;
-}
-
-/**************************************************************************
- VCPU protection key register access routines
-**************************************************************************/
-
-IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval)
-{
- if (reg > XEN_IA64_NPKRS)
- return IA64_RSVDREG_FAULT; /* register index to large */
-
- *pval = (u64) PSCBX(vcpu, pkrs[reg]);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val)
-{
- ia64_pkr_t pkr_new;
-
- if (reg >= XEN_IA64_NPKRS)
- return IA64_RSVDREG_FAULT; /* index to large */
-
- pkr_new.val = val;
- if (pkr_new.reserved1)
- return IA64_RSVDREG_FAULT; /* reserved field */
-
- if (pkr_new.reserved2)
- return IA64_RSVDREG_FAULT; /* reserved field */
-
- PSCBX(vcpu, pkrs[reg]) = pkr_new.val;
- ia64_set_pkr(reg, pkr_new.val);
-
- return IA64_NO_FAULT;
-}
-
-/**************************************************************************
- VCPU translation register access routines
-**************************************************************************/
-
-static void
-vcpu_set_tr_entry_rid(TR_ENTRY * trp, u64 pte,
- u64 itir, u64 ifa, u64 rid)
-{
- u64 ps;
- union pte_flags new_pte;
-
- trp->itir = itir;
- trp->rid = rid;
- ps = trp->ps;
- new_pte.val = pte;
- if (new_pte.pl < CONFIG_CPL0_EMUL)
- new_pte.pl = CONFIG_CPL0_EMUL;
- trp->vadr = ifa & ~0xfff;
- if (ps > 12) { // "ignore" relevant low-order bits
- new_pte.ppn &= ~((1UL << (ps - 12)) - 1);
- trp->vadr &= ~((1UL << ps) - 1);
- }
-
- /* Atomic write. */
- trp->pte.val = new_pte.val;
-}
-
-static inline void
-vcpu_set_tr_entry(TR_ENTRY * trp, u64 pte, u64 itir, u64 ifa)
-{
- vcpu_set_tr_entry_rid(trp, pte, itir, ifa,
- VCPU(current, rrs[ifa >> 61]) & RR_RID_MASK);
-}
-
-IA64FAULT vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte,
- u64 itir, u64 ifa)
-{
- TR_ENTRY *trp;
-
- if (slot >= NDTRS)
- return IA64_RSVDREG_FAULT;
-
- vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
-
- trp = &PSCBX(vcpu, dtrs[slot]);
-//printk("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
- vcpu_set_tr_entry(trp, pte, itir, ifa);
- vcpu_quick_region_set(PSCBX(vcpu, dtr_regions), ifa);
-
- /*
- * FIXME According to spec, vhpt should be purged, but this
- * incurs considerable performance loss, since it is safe for
- * linux not to purge vhpt, vhpt purge is disabled until a
- * feasible way is found.
- *
- * vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));
- */
-
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte,
- u64 itir, u64 ifa)
-{
- TR_ENTRY *trp;
-
- if (slot >= NITRS)
- return IA64_RSVDREG_FAULT;
-
- vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
-
- trp = &PSCBX(vcpu, itrs[slot]);
-//printk("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
- vcpu_set_tr_entry(trp, pte, itir, ifa);
- vcpu_quick_region_set(PSCBX(vcpu, itr_regions), ifa);
-
- /*
- * FIXME According to spec, vhpt should be purged, but this
- * incurs considerable performance loss, since it is safe for
- * linux not to purge vhpt, vhpt purge is disabled until a
- * feasible way is found.
- *
- * vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));
- */
-
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_itr(VCPU * vcpu, u64 slot, u64 pte,
- u64 itir, u64 ifa, u64 rid)
-{
- TR_ENTRY *trp;
-
- if (slot >= NITRS)
- return IA64_RSVDREG_FAULT;
- trp = &PSCBX(vcpu, itrs[slot]);
- vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
-
- /* Recompute the itr_region. */
- vcpu->arch.itr_regions = 0;
- for (trp = vcpu->arch.itrs; trp < &vcpu->arch.itrs[NITRS]; trp++)
- if (trp->pte.p)
- vcpu_quick_region_set(vcpu->arch.itr_regions,
- trp->vadr);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 slot, u64 pte,
- u64 itir, u64 ifa, u64 rid)
-{
- TR_ENTRY *trp;
-
- if (slot >= NDTRS)
- return IA64_RSVDREG_FAULT;
- trp = &PSCBX(vcpu, dtrs[slot]);
- vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
-
- /* Recompute the dtr_region. */
- vcpu->arch.dtr_regions = 0;
- for (trp = vcpu->arch.dtrs; trp < &vcpu->arch.dtrs[NDTRS]; trp++)
- if (trp->pte.p)
- vcpu_quick_region_set(vcpu->arch.dtr_regions,
- trp->vadr);
- return IA64_NO_FAULT;
-}
-
-/**************************************************************************
- VCPU translation cache access routines
-**************************************************************************/
-
-static void
-vcpu_rebuild_vhpt(VCPU * vcpu, u64 ps)
-{
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
- printk("vhpt rebuild: using page_shift %d\n", (int)ps);
- vcpu->arch.vhpt_pg_shift = ps;
- vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
- vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
- local_vhpt_flush();
- load_region_regs(vcpu);
-#else
- panic_domain(NULL, "domain trying to use smaller page size!\n");
-#endif
-}
-
-void
-vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
- u64 mp_pte, u64 itir, struct p2m_entry *entry)
-{
- ia64_itir_t _itir = {.itir = itir};
- unsigned long psr;
-
- check_xen_space_overlap("itc", vaddr, 1UL << _itir.ps);
-
- // FIXME, must be inlined or potential for nested fault here!
- if ((vcpu->domain == dom0) && (_itir.ps < PAGE_SHIFT))
- panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use "
- "smaller page size!\n");
-
- BUG_ON(_itir.ps > PAGE_SHIFT);
- vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);
- psr = ia64_clear_ic();
- pte &= ~(_PAGE_RV2 | _PAGE_RV1); // Mask out the reserved bits.
- // FIXME: look for bigger mappings
- ia64_itc(IorD, vaddr, pte, _itir.itir);
- ia64_set_psr(psr);
- // ia64_srlz_i(); // no srls req'd, will rfi later
- if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) {
- // FIXME: this is dangerous... vhpt_flush_address ensures these
- // addresses never get flushed. More work needed if this
- // ever happens.
-//printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
- if (_itir.ps > vcpu->arch.vhpt_pg_shift)
- vhpt_multiple_insert(vaddr, pte, _itir.itir);
- else
- vhpt_insert(vaddr, pte, _itir.itir);
- }
- // even if domain pagesize is larger than PAGE_SIZE, just put
- // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
- else {
- vhpt_insert(vaddr, pte, _itir.itir);
- }
-}
-
-IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
-{
- unsigned long pteval;
- BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
- struct p2m_entry entry;
- ia64_itir_t _itir = {.itir = itir};
-
- if (_itir.ps < vcpu->arch.vhpt_pg_shift)
- vcpu_rebuild_vhpt(vcpu, _itir.ps);
-
- again:
- //itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz
- pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
- if (!pteval)
- return IA64_ILLOP_FAULT;
- if (swap_rr0)
- set_virtual_rr0();
- vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, _itir.itir, &entry);
- if (swap_rr0)
- set_metaphysical_rr0();
- if (p2m_entry_retry(&entry)) {
- vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
- goto again;
- }
- vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, itir, ifa);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
-{
- unsigned long pteval;
- BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
- struct p2m_entry entry;
- ia64_itir_t _itir = {.itir = itir};
-
- if (_itir.ps < vcpu->arch.vhpt_pg_shift)
- vcpu_rebuild_vhpt(vcpu, _itir.ps);
-
- again:
- //itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz
- pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
- if (!pteval)
- return IA64_ILLOP_FAULT;
- if (swap_rr0)
- set_virtual_rr0();
- vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, _itir.itir, &entry);
- if (swap_rr0)
- set_metaphysical_rr0();
- if (p2m_entry_retry(&entry)) {
- vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
- goto again;
- }
- vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, itir, ifa);
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 log_range)
-{
- BUG_ON(vcpu != current);
-
- check_xen_space_overlap("ptc_l", vadr, 1UL << log_range);
-
- /* Purge TC */
- vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
- vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
-
- /* Purge all tlb and vhpt */
- vcpu_flush_tlb_vhpt_range(vadr, log_range);
-
- return IA64_NO_FAULT;
-}
-
-// At privlvl=0, fc performs no access rights or protection key checks, while
-// at privlvl!=0, fc performs access rights checks as if it were a 1-byte
-// read but no protection key check. Thus in order to avoid an unexpected
-// access rights fault, we have to translate the virtual address to a
-// physical address (possibly via a metaphysical address) and do the fc
-// on the physical address, which is guaranteed to flush the same cache line
-IA64FAULT vcpu_fc(VCPU * vcpu, u64 vadr)
-{
- // TODO: Only allowed for current vcpu
- u64 mpaddr, paddr;
- IA64FAULT fault;
-
- again:
- fault = vcpu_tpa(vcpu, vadr, &mpaddr);
- if (fault == IA64_NO_FAULT) {
- struct p2m_entry entry;
- paddr = translate_domain_mpaddr(mpaddr, &entry);
- ia64_fc(__va(paddr));
- if (p2m_entry_retry(&entry))
- goto again;
- }
- return fault;
-}
-
-IA64FAULT vcpu_ptc_e(VCPU * vcpu, u64 vadr)
-{
- // Note that this only needs to be called once, i.e. the
- // architected loop to purge the entire TLB, should use
- // base = stride1 = stride2 = 0, count0 = count 1 = 1
-
- vcpu_flush_vtlb_all(current);
-
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 addr_range)
-{
- printk("vcpu_ptc_g: called, not implemented yet\n");
- return IA64_ILLOP_FAULT;
-}
-
-IA64FAULT vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 addr_range)
-{
- // FIXME: validate not flushing Xen addresses
- // if (Xen address) return(IA64_ILLOP_FAULT);
- // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
-//printk("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
-
- check_xen_space_overlap("ptc_ga", vadr, addr_range);
-
- domain_flush_vtlb_range(vcpu->domain, vadr, addr_range);
-
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 log_range)
-{
- unsigned long region = vadr >> 61;
- u64 addr_range = 1UL << log_range;
- unsigned long rid, rr;
- int i;
- TR_ENTRY *trp;
-
- BUG_ON(vcpu != current);
- check_xen_space_overlap("ptr_d", vadr, 1UL << log_range);
-
- rr = PSCB(vcpu, rrs)[region];
- rid = rr & RR_RID_MASK;
-
- /* Purge TC */
- vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
-
- /* Purge tr and recompute dtr_regions. */
- vcpu->arch.dtr_regions = 0;
- for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++)
- if (vcpu_match_tr_entry_range
- (trp, rid, vadr, vadr + addr_range))
- vcpu_purge_tr_entry(trp);
- else if (trp->pte.p)
- vcpu_quick_region_set(vcpu->arch.dtr_regions,
- trp->vadr);
-
- vcpu_flush_tlb_vhpt_range(vadr, log_range);
-
- return IA64_NO_FAULT;
-}
-
-IA64FAULT vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 log_range)
-{
- unsigned long region = vadr >> 61;
- u64 addr_range = 1UL << log_range;
- unsigned long rid, rr;
- int i;
- TR_ENTRY *trp;
-
- BUG_ON(vcpu != current);
- check_xen_space_overlap("ptr_i", vadr, 1UL << log_range);
-
- rr = PSCB(vcpu, rrs)[region];
- rid = rr & RR_RID_MASK;
-
- /* Purge TC */
- vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
-
- /* Purge tr and recompute itr_regions. */
- vcpu->arch.itr_regions = 0;
- for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++)
- if (vcpu_match_tr_entry_range
- (trp, rid, vadr, vadr + addr_range))
- vcpu_purge_tr_entry(trp);
- else if (trp->pte.p)
- vcpu_quick_region_set(vcpu->arch.itr_regions,
- trp->vadr);
-
- vcpu_flush_tlb_vhpt_range(vadr, log_range);
-
- return IA64_NO_FAULT;
-}
diff --git a/xen/arch/ia64/xen/vhpt.c b/xen/arch/ia64/xen/vhpt.c
deleted file mode 100644
index 1c3e71cbbb..0000000000
--- a/xen/arch/ia64/xen/vhpt.c
+++ /dev/null
@@ -1,585 +0,0 @@
-/*
- * Initialize VHPT support.
- *
- * Copyright (C) 2004 Hewlett-Packard Co
- * Dan Magenheimer <dan.magenheimer@hp.com>
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * per vcpu vhpt support
- */
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/pgalloc.h>
-#include <asm/page.h>
-#include <asm/vhpt.h>
-#include <asm/vcpu.h>
-#include <asm/vcpumask.h>
-#include <asm/vmmu.h>
-
-DEFINE_PER_CPU_READ_MOSTLY(unsigned long, vhpt_paddr);
-DEFINE_PER_CPU_READ_MOSTLY(unsigned long, vhpt_pend);
-#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
-DEFINE_PER_CPU(volatile u32, vhpt_tlbflush_timestamp);
-#endif
-
-static void
-__vhpt_flush(unsigned long vhpt_maddr, unsigned long vhpt_size_log2)
-{
- struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr);
- unsigned long num_entries = 1 << (vhpt_size_log2 - 5);
- int i;
-
- for (i = 0; i < num_entries; i++, v++)
- v->ti_tag = INVALID_TI_TAG;
-}
-
-void
-local_vhpt_flush(void)
-{
- /* increment flush clock before flush */
- u32 flush_time = tlbflush_clock_inc_and_return();
- __vhpt_flush(__ia64_per_cpu_var(vhpt_paddr), VHPT_SIZE_LOG2);
- /* this must be after flush */
- tlbflush_update_time(&__get_cpu_var(vhpt_tlbflush_timestamp),
- flush_time);
- perfc_incr(local_vhpt_flush);
-}
-
-void
-vcpu_vhpt_flush(struct vcpu* v)
-{
- unsigned long vhpt_size_log2 = VHPT_SIZE_LOG2;
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
- if (HAS_PERVCPU_VHPT(v->domain))
- vhpt_size_log2 = v->arch.pta.size;
-#endif
- __vhpt_flush(vcpu_vhpt_maddr(v), vhpt_size_log2);
- perfc_incr(vcpu_vhpt_flush);
-}
-
-static void
-vhpt_erase(unsigned long vhpt_maddr, unsigned long vhpt_size_log2)
-{
- struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr);
- unsigned long num_entries = 1 << (vhpt_size_log2 - 5);
- int i;
-
- for (i = 0; i < num_entries; i++, v++) {
- v->itir = 0;
- v->CChain = 0;
- v->page_flags = 0;
- v->ti_tag = INVALID_TI_TAG;
- }
- // initialize cache too???
-}
-
-void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long itir)
-{
- struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
- unsigned long tag = ia64_ttag (vadr);
-
- /* Even though VHPT is per VCPU, still need to first disable the entry,
- * because the processor may support speculative VHPT walk. */
- vlfe->ti_tag = INVALID_TI_TAG;
- wmb();
- vlfe->itir = itir;
- vlfe->page_flags = pte | _PAGE_P;
- *(volatile unsigned long*)&vlfe->ti_tag = tag;
-}
-
-void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
- unsigned long itir)
-{
- unsigned char ps = current->arch.vhpt_pg_shift;
- ia64_itir_t _itir = {.itir = itir};
- unsigned long mask = (1L << _itir.ps) - 1;
- int i;
-
- if (_itir.ps - ps > 10 && !running_on_sim) {
- // if this happens, we may want to revisit this algorithm
- panic("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
- }
- if (_itir.ps - ps > 2) {
- // FIXME: Should add counter here to see how often this
- // happens (e.g. for 16MB pages!) and determine if it
- // is a performance problem. On a quick look, it takes
- // about 39000 instrs for a 16MB page and it seems to occur
- // only a few times/second, so OK for now.
- // An alternate solution would be to just insert the one
- // 16KB in the vhpt (but with the full mapping)?
- //printk("vhpt_multiple_insert: logps-PAGE_SHIFT==%d,"
- //"va=%p, pa=%p, pa-masked=%p\n",
- //logps-PAGE_SHIFT,vaddr,pte&_PFN_MASK,
- //(pte&_PFN_MASK)&~mask);
- }
- vaddr &= ~mask;
- pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
- for (i = 1L << (_itir.ps - ps); i > 0; i--) {
- vhpt_insert(vaddr, pte, _itir.itir);
- vaddr += (1L << ps);
- }
-}
-
-void __init vhpt_init(void)
-{
- unsigned long paddr;
- struct page_info *page;
-#if !VHPT_ENABLED
- return;
-#endif
- /* This allocation only holds true if vhpt table is unique for
- * all domains. Or else later new vhpt table should be allocated
- * from domain heap when each domain is created. Assume xen buddy
- * allocator can provide natural aligned page by order?
- */
- page = alloc_domheap_pages(NULL, VHPT_SIZE_LOG2 - PAGE_SHIFT, 0);
- if (!page)
- panic("vhpt_init: can't allocate VHPT!\n");
- paddr = page_to_maddr(page);
- if (paddr & ((1 << VHPT_SIZE_LOG2) - 1))
- panic("vhpt_init: bad VHPT alignment!\n");
- __get_cpu_var(vhpt_paddr) = paddr;
- __get_cpu_var(vhpt_pend) = paddr + (1 << VHPT_SIZE_LOG2) - 1;
- printk(XENLOG_DEBUG "vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n",
- paddr, __get_cpu_var(vhpt_pend));
- vhpt_erase(paddr, VHPT_SIZE_LOG2);
- // we don't enable VHPT here.
- // context_switch() or schedule_tail() does it.
-}
-
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
-void
-domain_set_vhpt_size(struct domain *d, int8_t vhpt_size_log2)
-{
- if (vhpt_size_log2 == -1) {
- d->arch.has_pervcpu_vhpt = 0;
- printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: "
- "domain %d VHPT is global.\n", d->domain_id);
- } else {
- d->arch.has_pervcpu_vhpt = 1;
- d->arch.vhpt_size_log2 = vhpt_size_log2;
- printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: "
- "domain %d VHPT is per vcpu. size=2**%d\n",
- d->domain_id, vhpt_size_log2);
- }
-}
-
-int
-pervcpu_vhpt_alloc(struct vcpu *v)
-{
- unsigned long vhpt_size_log2 = VHPT_SIZE_LOG2;
-
- if (v->domain->arch.vhpt_size_log2 > 0)
- vhpt_size_log2 =
- canonicalize_vhpt_size(v->domain->arch.vhpt_size_log2);
- printk(XENLOG_DEBUG "%s vhpt_size_log2=%ld\n",
- __func__, vhpt_size_log2);
- v->arch.vhpt_entries =
- (1UL << vhpt_size_log2) / sizeof(struct vhpt_lf_entry);
- v->arch.vhpt_page =
- alloc_domheap_pages(NULL, vhpt_size_log2 - PAGE_SHIFT, 0);
- if (!v->arch.vhpt_page)
- return -ENOMEM;
-
- v->arch.vhpt_maddr = page_to_maddr(v->arch.vhpt_page);
- if (v->arch.vhpt_maddr & ((1 << VHPT_SIZE_LOG2) - 1))
- panic("pervcpu_vhpt_init: bad VHPT alignment!\n");
-
- v->arch.pta.val = 0; // to zero reserved bits
- v->arch.pta.ve = 1; // enable vhpt
- v->arch.pta.size = vhpt_size_log2;
- v->arch.pta.vf = 1; // long format
- v->arch.pta.base = __va_ul(v->arch.vhpt_maddr) >> 15;
-
- vhpt_erase(v->arch.vhpt_maddr, vhpt_size_log2);
- smp_mb(); // per vcpu vhpt may be used by another physical cpu.
- return 0;
-}
-
-void
-pervcpu_vhpt_free(struct vcpu *v)
-{
- if (likely(v->arch.vhpt_page != NULL))
- free_domheap_pages(v->arch.vhpt_page,
- v->arch.pta.size - PAGE_SHIFT);
-}
-#endif
-
-void
-domain_purge_swtc_entries(struct domain *d)
-{
- struct vcpu* v;
- for_each_vcpu(d, v) {
- if (!v->is_initialised)
- continue;
-
- /* Purge TC entries.
- FIXME: clear only if match. */
- vcpu_purge_tr_entry(&PSCBX(v,dtlb));
- vcpu_purge_tr_entry(&PSCBX(v,itlb));
- }
-}
-
-void
-domain_purge_swtc_entries_vcpu_dirty_mask(struct domain* d,
- vcpumask_t vcpu_dirty_mask)
-{
- int vcpu;
-
- for_each_vcpu_mask(d, vcpu, vcpu_dirty_mask) {
- struct vcpu* v = d->vcpu[vcpu];
- if (!v->is_initialised)
- continue;
-
- /* Purge TC entries.
- FIXME: clear only if match. */
- vcpu_purge_tr_entry(&PSCBX(v, dtlb));
- vcpu_purge_tr_entry(&PSCBX(v, itlb));
- }
-}
-
-// SMP: we can't assume v == current, vcpu might move to another physical cpu.
-// So memory barrier is necessary.
-// if we can guranttee that vcpu can run on only this physical cpu
-// (e.g. vcpu == current), smp_mb() is unnecessary.
-void vcpu_flush_vtlb_all(struct vcpu *v)
-{
- /* First VCPU tlb. */
- vcpu_purge_tr_entry(&PSCBX(v,dtlb));
- vcpu_purge_tr_entry(&PSCBX(v,itlb));
- smp_mb();
-
- /* Then VHPT. */
- if (HAS_PERVCPU_VHPT(v->domain))
- vcpu_vhpt_flush(v);
- else
- local_vhpt_flush();
- smp_mb();
-
- /* Then mTLB. */
- local_flush_tlb_all();
-
- /* We could clear bit in d->domain_dirty_cpumask only if domain d in
- not running on this processor. There is currently no easy way to
- check this. */
-
- perfc_incr(vcpu_flush_vtlb_all);
-}
-
-static void __vcpu_flush_vtlb_all(void *vcpu)
-{
- vcpu_flush_vtlb_all((struct vcpu*)vcpu);
-}
-
-// caller must incremented reference count to d somehow.
-void domain_flush_vtlb_all(struct domain* d)
-{
- int cpu = smp_processor_id ();
- struct vcpu *v;
-
- for_each_vcpu(d, v) {
- if (!v->is_initialised)
- continue;
-
- if (VMX_DOMAIN(v)) {
- // This code may be called for remapping shared_info
- // and grant_table from guest_physmap_remove_page()
- // in arch_memory_op() XENMEM_add_to_physmap to realize
- // PV-on-HVM feature.
- vmx_vcpu_flush_vtlb_all(v);
- continue;
- }
-
- if (v->processor == cpu)
- vcpu_flush_vtlb_all(v);
- else
- // SMP: it is racy to reference v->processor.
- // vcpu scheduler may move this vcpu to another
- // physicall processor, and change the value
- // using plain store.
- // We may be seeing the old value of it.
- // In such case, flush_vtlb_for_context_switch()
- // takes care of mTLB flush.
- smp_call_function_single(v->processor,
- __vcpu_flush_vtlb_all,
- v, 1);
- }
- perfc_incr(domain_flush_vtlb_all);
-}
-
-// Callers may need to call smp_mb() before/after calling this.
-// Be carefull.
-static void
-__flush_vhpt_range(unsigned long vhpt_maddr, u64 vadr, u64 addr_range)
-{
- void *vhpt_base = __va(vhpt_maddr);
- u64 pgsz = 1L << current->arch.vhpt_pg_shift;
- u64 purge_addr = vadr & PAGE_MASK;
-
- addr_range += vadr - purge_addr;
- addr_range = PAGE_ALIGN(addr_range);
- while ((long)addr_range > 0) {
- /* Get the VHPT entry. */
- unsigned int off = ia64_thash(purge_addr) -
- __va_ul(vcpu_vhpt_maddr(current));
- struct vhpt_lf_entry *v = vhpt_base + off;
- v->ti_tag = INVALID_TI_TAG;
- addr_range -= pgsz;
- purge_addr += pgsz;
- }
-}
-
-static void
-cpu_flush_vhpt_range(int cpu, u64 vadr, u64 addr_range)
-{
- __flush_vhpt_range(per_cpu(vhpt_paddr, cpu), vadr, addr_range);
-}
-
-static void
-vcpu_flush_vhpt_range(struct vcpu* v, u64 vadr, u64 addr_range)
-{
- __flush_vhpt_range(vcpu_vhpt_maddr(v), vadr, addr_range);
-}
-
-void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range)
-{
- if (HAS_PERVCPU_VHPT(current->domain))
- vcpu_flush_vhpt_range(current, vadr, 1UL << log_range);
- else
- cpu_flush_vhpt_range(current->processor,
- vadr, 1UL << log_range);
- ia64_ptcl(vadr, log_range << 2);
- ia64_srlz_i();
- perfc_incr(vcpu_flush_tlb_vhpt_range);
-}
-
-void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
-{
- struct vcpu *v;
-
-#if 0
- // this only seems to occur at shutdown, but it does occur
- if ((!addr_range) || addr_range & (addr_range - 1)) {
- printk("vhpt_flush_address: weird range, spinning...\n");
- while(1);
- }
-#endif
-
- domain_purge_swtc_entries(d);
- smp_mb();
-
- for_each_vcpu (d, v) {
- if (!v->is_initialised)
- continue;
-
- if (HAS_PERVCPU_VHPT(d)) {
- vcpu_flush_vhpt_range(v, vadr, addr_range);
- } else {
- // SMP: it is racy to reference v->processor.
- // vcpu scheduler may move this vcpu to another
- // physicall processor, and change the value
- // using plain store.
- // We may be seeing the old value of it.
- // In such case, flush_vtlb_for_context_switch()
- /* Invalidate VHPT entries. */
- cpu_flush_vhpt_range(v->processor, vadr, addr_range);
- }
- }
- // ptc.ga has release semantics.
-
- /* ptc.ga */
- platform_global_tlb_purge(vadr, vadr + addr_range,
- current->arch.vhpt_pg_shift);
- perfc_incr(domain_flush_vtlb_range);
-}
-
-#ifdef CONFIG_XEN_IA64_TLB_TRACK
-#include <asm/tlb_track.h>
-#include <asm/vmx_vcpu.h>
-void
-__domain_flush_vtlb_track_entry(struct domain* d,
- const struct tlb_track_entry* entry)
-{
- unsigned long rr7_rid;
- int swap_rr0 = 0;
- unsigned long old_rid;
- unsigned long vaddr = entry->vaddr;
- struct vcpu* v;
- int cpu;
- int vcpu;
- int local_purge = 1;
-
- /* tlb inert tracking is done in PAGE_SIZE uint. */
- unsigned char ps = max_t(unsigned char,
- current->arch.vhpt_pg_shift, PAGE_SHIFT);
- /* This case isn't supported (yet). */
- BUG_ON(current->arch.vhpt_pg_shift > PAGE_SHIFT);
-
- BUG_ON((vaddr >> VRN_SHIFT) != VRN7);
- /*
- * heuristic:
- * dom0linux accesses grant mapped pages via the kernel
- * straight mapped area and it doesn't change rr7 rid.
- * So it is likey that rr7 == entry->rid so that
- * we can avoid rid change.
- * When blktap is supported, this heuristic should be revised.
- */
- vcpu_get_rr(current, VRN7 << VRN_SHIFT, &rr7_rid);
- if (likely(rr7_rid == entry->rid)) {
- perfc_incr(tlb_track_use_rr7);
- } else {
- swap_rr0 = 1;
- vaddr = (vaddr << 3) >> 3;// force vrn0
- perfc_incr(tlb_track_swap_rr0);
- }
-
- // tlb_track_entry_printf(entry);
- if (swap_rr0) {
- vcpu_get_rr(current, 0, &old_rid);
- vcpu_set_rr(current, 0, entry->rid);
- }
-
- if (HAS_PERVCPU_VHPT(d)) {
- for_each_vcpu_mask(d, vcpu, entry->vcpu_dirty_mask) {
- v = d->vcpu[vcpu];
- if (!v->is_initialised)
- continue;
-
- /* Invalidate VHPT entries. */
- vcpu_flush_vhpt_range(v, vaddr, 1L << ps);
-
- /*
- * current->processor == v->processor
- * is racy. we may see old v->processor and
- * a new physical processor of v might see old
- * vhpt entry and insert tlb.
- */
- if (v != current)
- local_purge = 0;
- }
- } else {
- for_each_cpu(cpu, &entry->pcpu_dirty_mask) {
- /* Invalidate VHPT entries. */
- cpu_flush_vhpt_range(cpu, vaddr, 1L << ps);
-
- if (d->vcpu[cpu] != current)
- local_purge = 0;
- }
- }
-
- /* ptc.ga */
- if (local_purge) {
- ia64_ptcl(vaddr, ps << 2);
- perfc_incr(domain_flush_vtlb_local);
- } else {
- /* ptc.ga has release semantics. */
- platform_global_tlb_purge(vaddr, vaddr + (1L << ps), ps);
- perfc_incr(domain_flush_vtlb_global);
- }
-
- if (swap_rr0) {
- vcpu_set_rr(current, 0, old_rid);
- }
- perfc_incr(domain_flush_vtlb_track_entry);
-}
-
-void
-domain_flush_vtlb_track_entry(struct domain* d,
- const struct tlb_track_entry* entry)
-{
- domain_purge_swtc_entries_vcpu_dirty_mask(d, entry->vcpu_dirty_mask);
- smp_mb();
-
- __domain_flush_vtlb_track_entry(d, entry);
-}
-
-#endif
-
-static void flush_tlb_vhpt_all (struct domain *d)
-{
- /* First VHPT. */
- local_vhpt_flush ();
-
- /* Then mTLB. */
- local_flush_tlb_all ();
-}
-
-void domain_flush_tlb_vhpt(struct domain *d)
-{
- /* Very heavy... */
- if (HAS_PERVCPU_VHPT(d) || is_hvm_domain(d))
- on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
- else
- on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1);
- cpumask_clear(d->domain_dirty_cpumask);
-}
-
-void flush_tlb_for_log_dirty(struct domain *d)
-{
- struct vcpu *v;
-
- /* NB. There is no race because all vcpus are paused. */
- if (is_hvm_domain(d)) {
- for_each_vcpu (d, v) {
- if (!v->is_initialised)
- continue;
- /* XXX: local_flush_tlb_all is called redundantly */
- thash_purge_all(v);
- }
- smp_call_function((void (*)(void *))local_flush_tlb_all,
- NULL, 1);
- } else if (HAS_PERVCPU_VHPT(d)) {
- for_each_vcpu (d, v) {
- if (!v->is_initialised)
- continue;
- vcpu_purge_tr_entry(&PSCBX(v,dtlb));
- vcpu_purge_tr_entry(&PSCBX(v,itlb));
- vcpu_vhpt_flush(v);
- }
- on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
- } else {
- on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1);
- }
- cpumask_clear(d->domain_dirty_cpumask);
-}
-
-void flush_tlb_mask(const cpumask_t *mask)
-{
- int cpu;
-
- cpu = smp_processor_id();
- if (cpumask_test_cpu(cpu, mask))
- flush_tlb_vhpt_all (NULL);
-
- if (cpumask_subset(mask, cpumask_of(cpu)))
- return;
-
- for_each_cpu (cpu, mask)
- if (cpu != smp_processor_id())
- smp_call_function_single
- (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1);
-}
-
-#ifdef PERF_COUNTERS
-void gather_vhpt_stats(void)
-{
- int i, cpu;
-
- perfc_set(vhpt_nbr_entries, VHPT_NUM_ENTRIES);
-
- for_each_present_cpu (cpu) {
- struct vhpt_lf_entry *v = __va(per_cpu(vhpt_paddr, cpu));
- unsigned long vhpt_valid = 0;
-
- for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
- if (!(v->ti_tag & INVALID_TI_TAG))
- vhpt_valid++;
- per_cpu(perfcounters, cpu)[PERFC_vhpt_valid_entries] = vhpt_valid;
- }
-}
-#endif
diff --git a/xen/arch/ia64/xen/xen.lds.S b/xen/arch/ia64/xen/xen.lds.S
deleted file mode 100644
index 46e9421743..0000000000
--- a/xen/arch/ia64/xen/xen.lds.S
+++ /dev/null
@@ -1,283 +0,0 @@
-#include <linux/config.h>
-
-#include <asm/cache.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-#include <asm/pgtable.h>
-
-#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
-#include <asm-generic/vmlinux.lds.h>
-
-OUTPUT_FORMAT("elf64-ia64-little")
-OUTPUT_ARCH(ia64)
-ENTRY(phys_start)
-jiffies = jiffies_64;
-PHDRS {
- code PT_LOAD;
- percpu PT_LOAD;
- data PT_LOAD;
-}
-SECTIONS
-{
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.exit.text)
- *(.exit.data)
- *(.exitcall.exit)
- *(.IA_64.unwind.exit.text)
- *(.IA_64.unwind_info.exit.text)
- }
-
- v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
- phys_start = _start - LOAD_OFFSET;
-
- code : { } :code
- . = KERNEL_START;
-
- _text = .;
- _stext = .;
-
- .text : AT(ADDR(.text) - LOAD_OFFSET)
- {
- *(.text.ivt)
- *(.text)
- SCHED_TEXT
- LOCK_TEXT
- *(.gnu.linkonce.t*)
- }
- .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
- { *(.text2) }
-#ifdef CONFIG_SMP
- .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
- { *(.text.lock) }
-#endif
- _etext = .;
-
- /* Read-only data */
-
- /* Exception table */
- . = ALIGN(16);
- __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
- {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
-
- .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
- {
- __start___vtop_patchlist = .;
- *(.data.patch.vtop)
- __end___vtop_patchlist = .;
- }
-
- .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
- {
- __start___mckinley_e9_bundles = .;
- *(.data.patch.mckinley_e9)
- __end___mckinley_e9_bundles = .;
- }
-
- /* Global data */
- _data = .;
-
-#if defined(CONFIG_IA64_GENERIC)
- /* Machine Vector */
- . = ALIGN(16);
- .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
- {
- machvec_start = .;
- *(.machvec)
- machvec_end = .;
- }
-#endif
-
- /* Unwind info & table: */
- . = ALIGN(8);
- .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
- { *(.IA_64.unwind_info*) }
- .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET)
- {
- __start_unwind = .;
- *(.IA_64.unwind*)
- __end_unwind = .;
- }
-
- RODATA
-
- .opd : AT(ADDR(.opd) - LOAD_OFFSET)
- { *(.opd) }
-
- /* Initialization code and data: */
-
- . = ALIGN(PAGE_SIZE);
- __init_begin = .;
- .init.text : AT(ADDR(.init.text) - LOAD_OFFSET)
- {
- _sinittext = .;
- *(.init.text)
- _einittext = .;
- }
-
- .init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
- {
- *(.init.rodata)
- *(.init.rodata.str*)
- *(.init.data)
- *(.init.data.rel)
- *(.init.data.rel.*)
- }
-
- .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
- {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- }
-
- . = ALIGN(16);
- .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
- {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
- .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
- {
- __initcall_start = .;
- *(.initcallpresmp.init)
- __presmp_initcall_end = .;
- *(.initcall1.init)
- *(.initcall2.init)
- *(.initcall3.init)
- *(.initcall4.init)
- *(.initcall5.init)
- *(.initcall6.init)
- *(.initcall7.init)
- __initcall_end = .;
- }
- __con_initcall_start = .;
- .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
- { *(.con_initcall.init) }
- __con_initcall_end = .;
- __security_initcall_start = .;
- .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET)
- { *(.security_initcall.init) }
- __security_initcall_end = .;
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
-
- /* The initial task and kernel stack */
- .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
- { *(.data.init_task) }
-
- .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
- { *(__special_page_section)
- __start_gate_section = .;
- *(.data.gate)
- __stop_gate_section = .;
- }
- . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
- * kernel data
- */
-
- .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
- { *(.data.read_mostly) }
-
- .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
- { *(.data.cacheline_aligned) }
-
- /* Per-cpu data: */
- percpu : { } :percpu
- . = ALIGN(PERCPU_PAGE_SIZE);
- __phys_per_cpu_start = .;
- .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
- {
- __per_cpu_start = .;
- *(.data.percpu)
- . = ALIGN(SMP_CACHE_BYTES);
- *(.data.percpu.read_mostly)
- __per_cpu_end = .;
- }
- . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
- * into percpu page size
- */
-
- data : { } :data
- .data : AT(ADDR(.data) - LOAD_OFFSET)
- {
-#ifdef CONFIG_SMP
- . = ALIGN(PERCPU_PAGE_SIZE);
- __cpu0_per_cpu = .;
- . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
-#endif
- *(.data)
- *(.data1)
- *(.gnu.linkonce.d*)
- CONSTRUCTORS
- }
-
- . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
- .got : AT(ADDR(.got) - LOAD_OFFSET)
- { *(.got.plt) *(.got) }
- __gp = ADDR(.got) + 0x200000;
- /* We want the small data sections together, so single-instruction offsets
- can access them all, and initialized data all before uninitialized, so
- we can shorten the on-disk segment size. */
- .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
- { *(.sdata) *(.sdata1) *(.srdata) }
- _edata = .;
- _bss = .;
- .sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
- { *(.sbss) *(.scommon) }
- .bss : AT(ADDR(.bss) - LOAD_OFFSET)
- {
- . = ALIGN(PAGE_SIZE);
- *(.bss.page_aligned)
- *(.bss)
- *(COMMON)
- }
-
- _end = .;
-
- code : { } :code
- /* Stabs debugging sections. */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- /* DWARF debug sections.
- Symbols in the DWARF debugging sections are relative to the beginning
- of the section so we begin them at 0. */
- /* DWARF 1 */
- .debug 0 : { *(.debug) }
- .line 0 : { *(.line) }
- /* GNU DWARF 1 extensions */
- .debug_srcinfo 0 : { *(.debug_srcinfo) }
- .debug_sfnames 0 : { *(.debug_sfnames) }
- /* DWARF 1.1 and DWARF 2 */
- .debug_aranges 0 : { *(.debug_aranges) }
- .debug_pubnames 0 : { *(.debug_pubnames) }
- /* DWARF 2 */
- .debug_info 0 : { *(.debug_info) }
- .debug_abbrev 0 : { *(.debug_abbrev) }
- .debug_line 0 : { *(.debug_line) }
- .debug_frame 0 : { *(.debug_frame) }
- .debug_str 0 : { *(.debug_str) }
- .debug_loc 0 : { *(.debug_loc) }
- .debug_macinfo 0 : { *(.debug_macinfo) }
- /* SGI/MIPS DWARF 2 extensions */
- .debug_weaknames 0 : { *(.debug_weaknames) }
- .debug_funcnames 0 : { *(.debug_funcnames) }
- .debug_typenames 0 : { *(.debug_typenames) }
- .debug_varnames 0 : { *(.debug_varnames) }
- /* These must appear regardless of . */
- /* Discard them for now since Intel SoftSDV cannot handle them.
- .comment 0 : { *(.comment) }
- .note 0 : { *(.note) }
- */
- /DISCARD/ : { *(.comment) }
- /DISCARD/ : { *(.note) }
-}
diff --git a/xen/arch/ia64/xen/xenasm.S b/xen/arch/ia64/xen/xenasm.S
deleted file mode 100644
index 9ce50fef65..0000000000
--- a/xen/arch/ia64/xen/xenasm.S
+++ /dev/null
@@ -1,649 +0,0 @@
-/*
- * Assembly support routines for Xen/ia64
- *
- * Copyright (C) 2004 Hewlett-Packard Co
- * Dan Magenheimer <dan.magenheimer@hp.com>
- *
- * Copyright (C) 2007 VA Linux Systems Japan K.K.
- * Isaku Yamahata <yamahata at valinux co jp>
- * ia64_copy_rbs()
- */
-
-#include <linux/config.h>
-#include <asm/asmmacro.h>
-#include <asm/processor.h>
-#include <asm/pgtable.h>
-#include <asm/vhpt.h>
-#include <asm/asm-xsi-offsets.h>
-#include <asm/vmmu.h>
-#include <public/xen.h>
-
-// Change rr7 to the passed value while ensuring
-// Xen is mapped into the new region.
-#define PSR_BITS_TO_CLEAR \
- (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
- IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
- IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_IC)
-// FIXME? Note that this turns off the DB bit (debug)
-#define PSR_BITS_TO_SET IA64_PSR_BN
-
-//extern void ia64_new_rr7(unsigned long rid, /* in0 */
-// void *shared_info, /* in1 */
-// void *shared_arch_info, /* in2 */
-// unsigned long shared_info_va, /* in3 */
-// unsigned long va_vhpt) /* in4 */
-//Local usage:
-// loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse
-// loc5=shared_archinfo_paddr, loc6=xen_paddr,
-// r16, r19, r20 are used by ia64_switch_mode_{phys, virt}()
-// loc5 is unused.
-GLOBAL_ENTRY(ia64_new_rr7)
- // FIXME? not sure this unwind statement is correct...
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 5, 7, 0, 0
- movl loc2=PERCPU_ADDR
-1: {
- mov loc3 = psr // save psr
- mov loc0 = rp // save rp
- mov r8 = ip // save ip to compute branch
- };;
- .body
- tpa loc2=loc2 // grab this BEFORE changing rr7
- tpa in1=in1 // grab shared_info BEFORE changing rr7
- adds r8 = 1f-1b,r8 // calculate return address for call
- ;;
- tpa loc5=in2 // grab arch_vcpu_info BEFORE chg rr7
- movl r17=PSR_BITS_TO_SET
- mov loc4=ar.rsc // save RSE configuration
- movl r16=PSR_BITS_TO_CLEAR
- ;;
- tpa r8=r8 // convert rp to physical
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- or loc3=loc3,r17 // add in psr the bits to set
- ;;
-
- andcm r16=loc3,r16 // removes bits to clear from psr
- dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
- br.call.sptk.many rp=ia64_switch_mode_phys
-1:
- // now in physical mode with psr.i/ic off so do rr7 switch
- dep r16=-1,r0,61,3 // Note: belong to region 7!
- ;;
- mov rr[r16]=in0
- ;;
- srlz.d
- ;;
- movl r26=PAGE_KERNEL
- ;;
-
- // re-pin mappings for kernel text and data
- mov r24=KERNEL_TR_PAGE_SHIFT<<2
- movl r17=KERNEL_START
- ;;
- ptr.i r17,r24
- ptr.d r17,r24
- mov r16=IA64_TR_KERNEL
- mov cr.itir=r24
- mov cr.ifa=r17
- or r18=loc6,r26
- ;;
- itr.i itr[r16]=r18
- ;;
- itr.d dtr[r16]=r18
- ;;
-
- // re-pin mappings for stack (current)
- mov r25=IA64_GRANULE_SHIFT<<2
- dep r21=0,r13,60,4 // physical address of "current"
- ;;
- ptr.d r13,r25
- or r23=r21,r26 // construct PA | page properties
- mov cr.itir=r25
- mov cr.ifa=r13 // VA of next task...
- mov r21=IA64_TR_CURRENT_STACK
- ;;
- itr.d dtr[r21]=r23 // wire in new mapping...
-
- // Per-cpu
- mov r24=PERCPU_PAGE_SHIFT<<2
- movl r22=PERCPU_ADDR
- ;;
- ptr.d r22,r24
- or r23=loc2,r26 // construct PA | page properties
- mov cr.itir=r24
- mov cr.ifa=r22
- mov r25=IA64_TR_PERCPU_DATA
- ;;
- itr.d dtr[r25]=r23 // wire in new mapping...
-
- // VHPT
-#if VHPT_ENABLED
-#if IA64_GRANULE_SHIFT < VHPT_SIZE_LOG2
-#error "it must be that VHPT_SIZE_LOG2 <= IA64_GRANULE_SHIFT"
-#endif
- // unless overlaps with IA64_TR_CURRENT_STACK
- dep r15=0,in4,0,IA64_GRANULE_SHIFT
- dep r21=0,r13,0,IA64_GRANULE_SHIFT
- ;;
- cmp.eq p8,p0=r15,r21
-(p8) br.cond.sptk .vhpt_overlaps
- mov r21=IA64_TR_VHPT
- dep r22=0,r15,60,4 // physical address of
- // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
- mov r24=IA64_GRANULE_SHIFT<<2
- ;;
- ptr.d r15,r24
- or r23=r22,r26 // construct PA | page properties
- mov cr.itir=r24
- mov cr.ifa=r15
- srlz.d
- ;;
- itr.d dtr[r21]=r23 // wire in new mapping...
-.vhpt_overlaps:
-#endif
-
- // Shared info
- mov r24=XSI_SHIFT<<2
- movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RW)
- ;;
- ptr.d in3,r24
- or r23=in1,r25 // construct PA | page properties
- mov cr.itir=r24
- mov cr.ifa=in3
- mov r21=IA64_TR_SHARED_INFO
- ;;
- itr.d dtr[r21]=r23 // wire in new mapping...
-
- // Map mapped_regs
- mov r22=XMAPPEDREGS_OFS
- mov r24=XMAPPEDREGS_SHIFT<<2
- ;;
- add r22=r22,in3
- ;;
- ptr.d r22,r24
- or r23=loc5,r25 // construct PA | page properties
- mov cr.itir=r24
- mov cr.ifa=r22
- mov r21=IA64_TR_MAPPED_REGS
- ;;
- itr.d dtr[r21]=r23 // wire in new mapping...
-
- // done, switch back to virtual and return
- mov r16=loc3 // r16= original psr
- br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
- mov psr.l = loc3 // restore init PSR
-
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
- mov ar.rsc=loc4 // restore RSE configuration
- srlz.d // seralize restoration of psr.l
- br.ret.sptk.many rp
-END(ia64_new_rr7)
-
-
- /* ia64_new_rr7_efi:
- * in0 = rid
- * in1 = repin_percpu
- * in2 = VPD vaddr
- *
- * There seems to be no need to repin: palcode, mapped_regs
- * or vhpt. If they do need to be repinned then special care
- * needs to betaken to track the correct value to repin.
- * That is generally the values that were most recently pinned by
- * ia64_new_rr7.
- *
- * This code function could probably be merged with ia64_new_rr7
- * as it is just a trimmed down version of that function.
- * However, current can change without repinning occuring,
- * so simply getting the values from current does not work correctly.
- */
-
-GLOBAL_ENTRY(ia64_new_rr7_efi)
- // FIXME? not sure this unwind statement is correct...
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 3, 7, 0, 0
- movl loc2=PERCPU_ADDR
-1: {
- mov loc3 = psr // save psr
- mov loc0 = rp // save rp
- mov r8 = ip // save ip to compute branch
- };;
- .body
- tpa loc2=loc2 // grab this BEFORE changing rr7
- adds r8 = 1f-1b,r8 // calculate return address for call
- ;;
- movl r17=PSR_BITS_TO_SET
- mov loc4=ar.rsc // save RSE configuration
- movl r16=PSR_BITS_TO_CLEAR
- ;;
- tpa r8=r8 // convert rp to physical
- mov ar.rsc=0 // put RSE in enforced lazy, LE mode
- or loc3=loc3,r17 // add in psr the bits to set
- ;;
- dep loc6 = 0,in2,60,4 // get physical address of VPD
- ;;
- dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
- // mask granule shift
- ;;
- andcm r16=loc3,r16 // removes bits to clear from psr
- dep loc5=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
- br.call.sptk.many rp=ia64_switch_mode_phys
-1:
- movl r26=PAGE_KERNEL
- // now in physical mode with psr.i/ic off so do rr7 switch
- dep r16=-1,r0,61,3
- ;;
- mov rr[r16]=in0
- ;;
- srlz.d
-
- // re-pin mappings for kernel text and data
- mov r24=KERNEL_TR_PAGE_SHIFT<<2
- movl r17=KERNEL_START
- ;;
- ptr.i r17,r24
- ;;
- ptr.d r17,r24
- ;;
- srlz.i
- ;;
- srlz.d
- ;;
- mov r16=IA64_TR_KERNEL
- mov cr.itir=r24
- mov cr.ifa=r17
- or r18=loc5,r26
- ;;
- itr.i itr[r16]=r18
- ;;
- itr.d dtr[r16]=r18
- ;;
- srlz.i
- ;;
- srlz.d
- ;;
-
- // re-pin mappings for stack (current)
- mov r25=IA64_GRANULE_SHIFT<<2
- dep r21=0,r13,60,4 // physical address of "current"
- ;;
- ptr.d r13,r25
- ;;
- srlz.d
- ;;
- or r23=r21,r26 // construct PA | page properties
- mov cr.itir=r25
- mov cr.ifa=r13 // VA of next task...
- mov r21=IA64_TR_CURRENT_STACK
- ;;
- itr.d dtr[r21]=r23 // wire in new mapping...
- ;;
- srlz.d
- ;;
-
- // Per-cpu
- cmp.eq p7,p0=r0,in1
-(p7) br.cond.sptk ia64_new_rr7_efi_percpu_not_mapped
- mov r24=PERCPU_PAGE_SHIFT<<2
- movl r22=PERCPU_ADDR
- ;;
- ptr.d r22,r24
- ;;
- srlz.d
- ;;
- or r23=loc2,r26
- mov cr.itir=r24
- mov cr.ifa=r22
- mov r25=IA64_TR_PERCPU_DATA
- ;;
- itr.d dtr[r25]=r23 // wire in new mapping...
- ;;
- srlz.d
- ;;
-ia64_new_rr7_efi_percpu_not_mapped:
-
- // VPD
- cmp.eq p7,p0=r0,in2
-(p7) br.cond.sptk ia64_new_rr7_efi_vpd_not_mapped
- or loc6 = r26,loc6 // construct PA | page properties
- mov r22=IA64_TR_VPD
- mov r24=IA64_TR_MAPPED_REGS
- mov r23=IA64_GRANULE_SHIFT<<2
- ;;
- ptr.i in2,r23
- ;;
- ptr.d in2,r24
- ;;
- srlz.i
- ;;
- srlz.d
- ;;
- mov cr.itir=r23
- mov cr.ifa=in2
- ;;
- itr.i itr[r22]=loc6
- ;;
- itr.d dtr[r24]=loc6
- ;;
- srlz.i
- ;;
- srlz.d
- ;;
-ia64_new_rr7_efi_vpd_not_mapped:
-
- // done, switch back to virtual and return
- mov r16=loc3 // r16= original psr
- br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
- mov psr.l = loc3 // restore init PSR
- ;;
-
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
- mov ar.rsc=loc4 // restore RSE configuration
- srlz.d // seralize restoration of psr.l
- br.ret.sptk.many rp
-END(ia64_new_rr7_efi)
-
-#if 0 /* Not used */
-#include "minstate.h"
-
-GLOBAL_ENTRY(ia64_prepare_handle_privop)
- .prologue
- /*
- * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
- */
- mov r16=r0
- DO_SAVE_SWITCH_STACK
- br.call.sptk.many rp=ia64_handle_privop // stack frame setup in ivt
-.ret22: .body
- DO_LOAD_SWITCH_STACK
- br.cond.sptk.many rp // goes to ia64_leave_kernel
-END(ia64_prepare_handle_privop)
-
-GLOBAL_ENTRY(ia64_prepare_handle_break)
- .prologue
- /*
- * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
- */
- mov r16=r0
- DO_SAVE_SWITCH_STACK
- br.call.sptk.many rp=ia64_handle_break // stack frame setup in ivt
-.ret23: .body
- DO_LOAD_SWITCH_STACK
- br.cond.sptk.many rp // goes to ia64_leave_kernel
-END(ia64_prepare_handle_break)
-
-GLOBAL_ENTRY(ia64_prepare_handle_reflection)
- .prologue
- /*
- * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
- */
- mov r16=r0
- DO_SAVE_SWITCH_STACK
- br.call.sptk.many rp=ia64_handle_reflection // stack frame setup in ivt
-.ret24: .body
- DO_LOAD_SWITCH_STACK
- br.cond.sptk.many rp // goes to ia64_leave_kernel
-END(ia64_prepare_handle_reflection)
-#endif
-
-GLOBAL_ENTRY(__get_domain_bundle)
- EX(.failure_in_get_bundle,ld8 r8=[r32],8)
- ;;
- EX(.failure_in_get_bundle,ld8 r9=[r32])
- ;;
- br.ret.sptk.many rp
- ;;
-.failure_in_get_bundle:
- mov r8=0
- ;;
- mov r9=0
- ;;
- br.ret.sptk.many rp
- ;;
-END(__get_domain_bundle)
-
-/* derived from linux/arch/ia64/hp/sim/boot/boot_head.S */
-GLOBAL_ENTRY(pal_emulator_static)
- mov r8=-1
- mov r9=256
- ;;
- cmp.gtu p7,p8=r9,r32 /* r32 <= 255? */
-(p7) br.cond.sptk.few static
- ;;
- mov r9=512
- ;;
- cmp.gtu p7,p8=r9,r32
-(p7) br.cond.sptk.few stacked
- ;;
-static: cmp.eq p7,p8=6,r32 /* PAL_PTCE_INFO */
-(p8) br.cond.sptk.few 1f
- ;;
- mov r8=0 /* status = 0 */
- movl r9=0x100000000 /* tc.base */
- movl r10=0x0000000200000003 /* count[0], count[1] */
- movl r11=0x1000000000002000 /* stride[0], stride[1] */
- br.ret.sptk.few rp
-1: cmp.eq p7,p8=14,r32 /* PAL_FREQ_RATIOS */
-(p8) br.cond.sptk.few 1f
- mov r8=0 /* status = 0 */
- movl r9 =0x900000002 /* proc_ratio (1/100) */
- movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
- movl r11=0x900000002 /* itc_ratio<<32 (1/100) */
- ;;
-1: cmp.eq p7,p8=19,r32 /* PAL_RSE_INFO */
-(p8) br.cond.sptk.few 1f
- mov r8=0 /* status = 0 */
- mov r9=96 /* num phys stacked */
- mov r10=0 /* hints */
- mov r11=0
- br.ret.sptk.few rp
-1: cmp.eq p7,p8=1,r32 /* PAL_CACHE_FLUSH */
-(p8) br.cond.sptk.few 1f
-#if 0
- mov r9=ar.lc
- movl r8=524288 /* flush 512k million cache lines (16MB) */
- ;;
- mov ar.lc=r8
- movl r8=0xe000000000000000
- ;;
-.loop: fc r8
- add r8=32,r8
- br.cloop.sptk.few .loop
- sync.i
- ;;
- srlz.i
- ;;
- mov ar.lc=r9
- mov r8=r0
- ;;
-1: cmp.eq p7,p8=15,r32 /* PAL_PERF_MON_INFO */
-(p8) br.cond.sptk.few 1f
- mov r8=0 /* status = 0 */
- movl r9 =0x08122f04 /* generic=4 width=47 retired=8
- * cycles=18
- */
- mov r10=0 /* reserved */
- mov r11=0 /* reserved */
- mov r16=0xffff /* implemented PMC */
- mov r17=0x3ffff /* implemented PMD */
- add r18=8,r29 /* second index */
- ;;
- st8 [r29]=r16,16 /* store implemented PMC */
- st8 [r18]=r0,16 /* clear remaining bits */
- ;;
- st8 [r29]=r0,16 /* clear remaining bits */
- st8 [r18]=r0,16 /* clear remaining bits */
- ;;
- st8 [r29]=r17,16 /* store implemented PMD */
- st8 [r18]=r0,16 /* clear remaining bits */
- mov r16=0xf0 /* cycles count capable PMC */
- ;;
- st8 [r29]=r0,16 /* clear remaining bits */
- st8 [r18]=r0,16 /* clear remaining bits */
- mov r17=0xf0 /* retired bundles capable PMC */
- ;;
- st8 [r29]=r16,16 /* store cycles capable */
- st8 [r18]=r0,16 /* clear remaining bits */
- ;;
- st8 [r29]=r0,16 /* clear remaining bits */
- st8 [r18]=r0,16 /* clear remaining bits */
- ;;
- st8 [r29]=r17,16 /* store retired bundle capable */
- st8 [r18]=r0,16 /* clear remaining bits */
- ;;
- st8 [r29]=r0,16 /* clear remaining bits */
- st8 [r18]=r0,16 /* clear remaining bits */
- ;;
-1: br.cond.sptk.few rp
-#else
-1:
-#endif
-stacked:
- br.ret.sptk.few rp
-END(pal_emulator_static)
-
-// void ia64_copy_rbs(unsigned long* dst_bspstore, unsigned long* dst_rbs_size,
-// unsigned long* dst_rnat_p,
-// unsigned long* src_bsp, unsigned long src_rbs_size,
-// unsigned long src_rnat);
-// Caller must mask interrupions.
-// Caller must ensure that src_rbs_size isn't larger than the number
-// of physical stacked registers. otherwise loadrs fault with Illegal
-// Operation fault resulting in panic.
-//
-// r14 = r32 = dst_bspstore
-// r15 = r33 = dst_rbs_size_p
-// r16 = r34 = dst_rnat_p
-// r17 = r35 = src_bsp
-// r18 = r36 = src_rbs_size
-// r19 = r37 = src_rnat
-//
-// r20 = saved ar.rsc
-// r21 = saved ar.bspstore
-//
-// r22 = saved_ar_rnat
-// r23 = saved_ar_rp
-// r24 = saved_ar_pfs
-//
-// we save the value in this register and store it into [dst_rbs_size_p] and
-// [dst_rnat_p] after rse opeation is done.
-// r30 = return value of __ia64_copy_rbs to ia64_copy_to_rbs = dst_rbs_size
-// r31 = return value of __ia64_copy_rbs to ia64_copy_to_rbs = dst_rnat
-//
-#define dst_bspstore r14
-#define dst_rbs_size_p r15
-#define dst_rnat_p r16
-#define src_bsp r17
-#define src_rbs_size r18
-#define src_rnat r19
-
-#define saved_ar_rsc r20
-#define saved_ar_bspstore r21
-#define saved_ar_rnat r22
-#define saved_rp r23
-#define saved_ar_pfs r24
-
-#define dst_rbs_size r30
-#define dst_rnat r31
-ENTRY(__ia64_copy_rbs)
- .prologue
- .fframe 0
-
- // Here cfm.{sof, sol, sor, rrb}=0
- //
- // flush current register stack to backing store
-{
- flushrs // must be first isns in group
- srlz.i
-}
-
- // switch to enforced lazy mode
- mov saved_ar_rsc = ar.rsc
- ;;
- mov ar.rsc = 0
- ;;
-
- .save ar.bspstore, saved_ar_bspstore
- mov saved_ar_bspstore = ar.bspstore
- .save ar.rnat, saved_ar_rnat
- mov saved_ar_rnat = ar.rnat
- ;;
-
- .body
- // load from src
- mov ar.bspstore = src_bsp
- ;;
- mov ar.rnat = src_rnat
- shl src_rbs_size = src_rbs_size,16
- ;;
- mov ar.rsc = src_rbs_size
- ;;
-{
- loadrs // must be first isns in group
- ;;
-}
-
- // flush to dst
- mov ar.bspstore = dst_bspstore
- ;;
-{
- flushrs // must be first isns in group
- srlz.i
-}
- ;;
- mov dst_rbs_size = ar.bsp
- mov dst_rnat = ar.rnat
- ;;
- sub dst_rbs_size = dst_rbs_size, dst_bspstore
-
- // switch back to the original backing store
- .restorereg ar.bspstore
- mov ar.bspstore = saved_ar_bspstore
- ;;
- .restorereg ar.rnat
- mov ar.rnat = saved_ar_rnat
- ;;
- // restore rsc
- mov ar.rsc = saved_ar_rsc
-
- ;;
- br.ret.sptk.many rp
-END(__ia64_copy_rbs)
-
-GLOBAL_ENTRY(ia64_copy_rbs)
- .prologue
- .fframe 0
- .save ar.pfs, saved_ar_pfs
- alloc saved_ar_pfs = ar.pfs, 6, 0, 0, 0
- .save.b 0x1, saved_rp
- mov saved_rp = rp
-
- .body
- // we play with register backing store so that we can't use
- // stacked registers.
- // save in0-in5 to static scratch registres
- mov dst_bspstore = r32
- mov dst_rbs_size_p = r33
- mov dst_rnat_p = r34
- mov src_bsp = r35
- mov src_rbs_size = r36
- mov src_rnat = r37
- ;;
- // set cfm.{sof, sol, sor, rrb}=0 to avoid nasty stacked register
- // issues related to cover by calling void __ia64_copy_rbs(void).
- // cfm.{sof, sol, sor, rrb}=0 makes things easy.
- br.call.sptk.many rp = __ia64_copy_rbs
-
- st8 [dst_rbs_size_p] = dst_rbs_size
- st8 [dst_rnat_p] = dst_rnat
-
- .restorereg ar.pfs
- mov ar.pfs = saved_ar_pfs
- .restorereg rp
- mov rp = saved_rp
- ;;
- br.ret.sptk.many rp
-END(ia64_copy_rbs)
diff --git a/xen/arch/ia64/xen/xenmem.c b/xen/arch/ia64/xen/xenmem.c
deleted file mode 100644
index 6ca892fdd5..0000000000
--- a/xen/arch/ia64/xen/xenmem.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Xen memory allocator routines
- *
- * Copyright (C) 2005 Hewlett-Packard Co
- * Dan Magenheimer <dan.magenheimer@hp.com>
- * Copyright (C) 2005 Intel Corp.
- *
- * Routines used by ia64 machines with contiguous (or virtually contiguous)
- * memory.
- */
-
-#include <linux/config.h>
-#include <asm/pgtable.h>
-#include <xen/mm.h>
-
-#ifdef CONFIG_VIRTUAL_FRAME_TABLE
-#include <linux/efi.h>
-#include <asm/pgalloc.h>
-
-#define FRAMETABLE_PGD_OFFSET(ADDR) \
- (frametable_pg_dir + (((ADDR) >> PGDIR_SHIFT) & \
- ((1UL << (PAGE_SHIFT - 3)) - 1)))
-
-#define FRAMETABLE_PMD_OFFSET(PGD, ADDR) \
- __va((unsigned long *)(PGD) + (((ADDR) >> PMD_SHIFT) & \
- ((1UL << (PAGE_SHIFT - 3)) - 1)))
-
-#define FRAMETABLE_PTE_OFFSET(PMD, ADDR) \
- (pte_t *)__va((unsigned long *)(PMD) + (((ADDR) >> PAGE_SHIFT) & \
- ((1UL << (PAGE_SHIFT - 3)) - 1)))
-
-static unsigned long table_size;
-static bool_t __read_mostly opt_contig_mem;
-boolean_param("contig_mem", opt_contig_mem);
-#else
-#define opt_contig_mem 1
-#endif
-
-struct page_info *frame_table __read_mostly;
-unsigned long max_page;
-
-/*
- * Set up the page tables.
- */
-volatile unsigned long *mpt_table __read_mostly;
-
-void __init
-paging_init (void)
-{
- unsigned int mpt_order;
- unsigned long mpt_table_size;
- struct page_info *page;
- unsigned long i;
-
- if (!opt_contig_mem) {
- /* mpt_table is already allocated at this point. */
- return;
- }
-
- /* Create machine to physical mapping table
- * NOTE: similar to frame table, later we may need virtually
- * mapped mpt table if large hole exists. Also MAX_ORDER needs
- * to be changed in common code, which only support 16M by far
- */
- mpt_table_size = max_page * sizeof(unsigned long);
- mpt_order = get_order(mpt_table_size);
- ASSERT(mpt_order <= MAX_ORDER);
- page = alloc_domheap_pages(NULL, mpt_order, 0);
- if (page == NULL)
- panic("Not enough memory to bootstrap Xen.\n");
-
- mpt_table = page_to_virt(page);
- printk("machine to physical table: 0x%lx mpt_table_size 0x%lx\n"
- "mpt_order %u max_page 0x%lx\n",
- (u64)mpt_table, mpt_table_size, mpt_order, max_page);
- for (i = 0;
- i < ((1UL << mpt_order) << PAGE_SHIFT) / sizeof(mpt_table[0]);
- i++) {
- mpt_table[i] = INVALID_M2P_ENTRY;
- }
-}
-
-#ifdef CONFIG_VIRTUAL_FRAME_TABLE
-
-static unsigned long __init
-alloc_dir_page(void)
-{
- unsigned long mfn = alloc_boot_pages(1, 1);
- unsigned long dir;
- ++table_size;
- dir = mfn << PAGE_SHIFT;
- clear_page(__va(dir));
- return dir;
-}
-
-static inline unsigned long __init
-alloc_table_page(unsigned long fill)
-{
- unsigned long mfn = alloc_boot_pages(1, 1);
- unsigned long *table;
- unsigned long i;
- ++table_size;
- table = (unsigned long *)__va((mfn << PAGE_SHIFT));
- for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
- table[i] = fill;
- return mfn;
-}
-
-static void __init
-create_page_table(unsigned long start_page, unsigned long end_page,
- unsigned long fill)
-{
- unsigned long address;
- unsigned long *dir;
- pte_t *pteptr;
-
- for (address = start_page; address < end_page; address += PAGE_SIZE) {
- dir = FRAMETABLE_PGD_OFFSET(address);
- if (!*dir)
- *dir = alloc_dir_page();
- dir = FRAMETABLE_PMD_OFFSET(*dir, address);
- if (!*dir)
- *dir = alloc_dir_page();
- pteptr = FRAMETABLE_PTE_OFFSET(*dir, address);
- if (pte_none(*pteptr))
- set_pte(pteptr, pfn_pte(alloc_table_page(fill),
- PAGE_KERNEL));
- }
-}
-
-static int __init
-create_frametable_page_table (u64 start, u64 end, void *arg)
-{
- struct page_info *map_start, *map_end;
- unsigned long start_page, end_page;
-
- map_start = frame_table + (__pa(start) >> PAGE_SHIFT);
- map_end = frame_table + (__pa(end) >> PAGE_SHIFT);
-
- start_page = (unsigned long) map_start & PAGE_MASK;
- end_page = PAGE_ALIGN((unsigned long) map_end);
-
- create_page_table(start_page, end_page, 0L);
- return 0;
-}
-
-static int __init
-create_mpttable_page_table (u64 start, u64 end, void *arg)
-{
- unsigned long map_start, map_end;
- unsigned long start_page, end_page;
-
- map_start = (unsigned long)(mpt_table + (__pa(start) >> PAGE_SHIFT));
- map_end = (unsigned long)(mpt_table + (__pa(end) >> PAGE_SHIFT));
-
- start_page = map_start & PAGE_MASK;
- end_page = PAGE_ALIGN(map_end);
-
- create_page_table(start_page, end_page, INVALID_M2P_ENTRY);
- return 0;
-}
-
-void __init init_virtual_frametable(void)
-{
- /* Allocate virtual frame_table */
- frame_table = (struct page_info *) VIRT_FRAME_TABLE_ADDR;
- table_size = 0;
- efi_memmap_walk(create_frametable_page_table, NULL);
-
- printk("size of virtual frame_table: %lukB\n",
- ((table_size << PAGE_SHIFT) >> 10));
-
- /* Allocate virtual mpt_table */
- table_size = 0;
- mpt_table = (unsigned long *)VIRT_FRAME_TABLE_END - max_page;
- efi_memmap_walk(create_mpttable_page_table, NULL);
-
- printk("virtual machine to physical table: %p size: %lukB\n"
- "max_page: 0x%lx\n",
- mpt_table, ((table_size << PAGE_SHIFT) >> 10), max_page);
-
- /*
- * XXX work around for translate_domain_pte().
- * It returns mfn=0 when the machine page isn't present. This
- * behavior is a work around for memory mapped I/O where no device
- * is assigned. Xen might access page_info of mfn=0, so it must
- * be guaranteed that it exists. Otherwise xen panics with tlb miss
- * fault in xen's virtual address area.
- *
- * Once translate_domain_pte() is fixed correctly, this will
- * be removed.
- */
- if (!mfn_valid(0)) {
- printk("allocating frame table/mpt table at mfn 0.\n");
- create_frametable_page_table(0, PAGE_SIZE, NULL);
- create_mpttable_page_table(0, PAGE_SIZE, NULL);
- }
-}
-
-int
-ia64_mfn_valid (unsigned long pfn)
-{
- extern long ia64_frametable_probe(unsigned long);
- struct page_info *pg;
- int valid;
-
- if (opt_contig_mem)
- return 1;
- pg = mfn_to_page(pfn);
- valid = ia64_frametable_probe((unsigned long)pg);
- /* more check the whole struct of page_info */
- if (valid)
- valid = ia64_frametable_probe((unsigned long)(pg+1)-1);
- return valid;
-}
-
-EXPORT_SYMBOL(ia64_mfn_valid);
-
-#endif /* CONFIG_VIRTUAL_FRAME_TABLE */
-
-/* FIXME: postpone support to machines with big holes between physical memorys.
- * Current hack allows only efi memdesc upto 4G place. (See efi.c)
- */
-#define FT_ALIGN_SIZE (16UL << 20)
-void __init init_frametable(void)
-{
- unsigned long pfn;
- unsigned long frame_table_size;
-
-#ifdef CONFIG_VIRTUAL_FRAME_TABLE
- if (!opt_contig_mem) {
- init_virtual_frametable();
- return;
- }
-#endif
-
- frame_table_size = max_page * sizeof(struct page_info);
- frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
-
- /* Request continuous trunk from boot allocator, since HV
- * address is identity mapped */
- pfn = alloc_boot_pages(
- frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT);
-
- frame_table = __va(pfn << PAGE_SHIFT);
- memset(frame_table, 0, frame_table_size);
- printk("size of frame_table: %lukB\n",
- frame_table_size >> 10);
-}
diff --git a/xen/arch/ia64/xen/xenmisc.c b/xen/arch/ia64/xen/xenmisc.c
deleted file mode 100644
index 302204d090..0000000000
--- a/xen/arch/ia64/xen/xenmisc.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Xen misc
- *
- * Functions/decls that are/may be needed to link with Xen because
- * of x86 dependencies
- *
- * Copyright (C) 2004 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- *
- */
-
-#include <linux/config.h>
-#include <xen/sched.h>
-#include <linux/efi.h>
-#include <asm/processor.h>
-#include <xen/serial.h>
-#include <asm/io.h>
-#include <xen/softirq.h>
-#include <public/sched.h>
-#include <asm/vhpt.h>
-#include <asm/debugger.h>
-#include <asm/vmx.h>
-#include <asm/vmx_vcpu.h>
-#include <asm/vcpu.h>
-
-unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c
-
-/* FIXME: where these declarations should be there ? */
-extern void show_registers(struct pt_regs *regs);
-
-void hpsim_setup(char **x)
-{
-#ifdef CONFIG_SMP
- init_smp_config();
-#endif
-}
-
-struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); }
-
-///////////////////////////////
-// from common/keyhandler.c
-///////////////////////////////
-void dump_pageframe_info(struct domain *d)
-{
- printk("dump_pageframe_info not implemented\n");
-}
-
-///////////////////////////////
-// called from arch/ia64/head.S
-///////////////////////////////
-
-void console_print(char *msg)
-{
- printk("console_print called, how did start_kernel return???\n");
-}
-
-////////////////////////////////////
-// called from unaligned.c
-////////////////////////////////////
-
-void die_if_kernel(char *str, struct pt_regs *regs, long err)
-{
- if (guest_mode(regs))
- return;
-
- printk("%s: %s %ld\n", __func__, str, err);
- debugtrace_dump();
- show_registers(regs);
- domain_crash_synchronous();
-}
-
-long
-ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
- unsigned long user_rbs_end, unsigned long addr, long *val)
-{
- printk("ia64_peek: called, not implemented\n");
- return 1;
-}
-
-long
-ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
- unsigned long user_rbs_end, unsigned long addr, long val)
-{
- printk("ia64_poke: called, not implemented\n");
- return 1;
-}
-
-void
-ia64_sync_fph (struct task_struct *task)
-{
- printk("ia64_sync_fph: called, not implemented\n");
-}
-
-void
-ia64_flush_fph (struct task_struct *task)
-{
- printk("ia64_flush_fph: called, not implemented\n");
-}
-
-////////////////////////////////////
-// called from irq_ia64.c:init_IRQ()
-// (because CONFIG_IA64_HP_SIM is specified)
-////////////////////////////////////
-void hpsim_irq_init(void) { }
-
-
-// accomodate linux extable.c
-//const struct exception_table_entry *
-void *search_module_extables(unsigned long addr) { return NULL; }
-void *__module_text_address(unsigned long addr) { return NULL; }
-void *module_text_address(unsigned long addr) { return NULL; }
-
-
-void arch_dump_domain_info(struct domain *d)
-{
-}
-
-void arch_dump_vcpu_info(struct vcpu *v)
-{
-}
-
-void audit_domains_key(unsigned char key)
-{
-}
-
-void panic_domain(struct pt_regs *regs, const char *fmt, ...)
-{
- va_list args;
- char buf[256];
- struct vcpu *v = current;
-
- printk("$$$$$ PANIC in domain %d (k6=0x%lx): ",
- v->domain->domain_id,
- __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
- va_start(args, fmt);
- (void)vsnprintf(buf, sizeof(buf), fmt, args);
- va_end(args);
- printk(buf);
- if (regs)
- show_registers(regs);
- domain_crash_synchronous ();
-}
diff --git a/xen/arch/ia64/xen/xenpatch.c b/xen/arch/ia64/xen/xenpatch.c
deleted file mode 100644
index 4ecab74ebb..0000000000
--- a/xen/arch/ia64/xen/xenpatch.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/******************************************************************************
- * xenpatch.c
- * Copyright (c) 2006 Silicon Graphics Inc.
- * Jes Sorensen <jes@sgi.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Parts of this based on code from arch/ia64/kernel/patch.c
- */
-
-#include <xen/config.h>
-#include <xen/lib.h>
-#include <xen/init.h>
-#include <asm/xensystem.h>
-#include <asm/intrinsics.h>
-
-/*
- * This was adapted from code written by Tony Luck:
- *
- * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle
- * like this:
- *
- * 6 6 5 4 3 2 1
- * 3210987654321098765432109876543210987654321098765432109876543210
- * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG
- *
- * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB
- */
-static u64
-get_imm64 (u64 insn_addr)
-{
- u64 *p = (u64 *) (insn_addr & -16); /* mask out slot number */
-
- return ( (p[1] & 0x0800000000000000UL) << 4) | /*A*/
- ((p[1] & 0x00000000007fffffUL) << 40) | /*B*/
- ((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/
- ((p[1] & 0x0000100000000000UL) >> 23) | /*D*/
- ((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/
- ((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/
- ((p[1] & 0x000007f000000000UL) >> 36); /*G*/
-}
-
-/* Patch instruction with "val" where "mask" has 1 bits. */
-void
-ia64_patch (u64 insn_addr, u64 mask, u64 val)
-{
- u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16);
-#define insn_mask ((1UL << 41) - 1)
- unsigned long shift;
-
- b0 = b[0]; b1 = b[1];
- /* 5 bits of template, then 3 x 41-bit instructions */
- shift = 5 + 41 * (insn_addr % 16);
- if (shift >= 64) {
- m1 = mask << (shift - 64);
- v1 = val << (shift - 64);
- } else {
- m0 = mask << shift; m1 = mask >> (64 - shift);
- v0 = val << shift; v1 = val >> (64 - shift);
- b[0] = (b0 & ~m0) | (v0 & m0);
- }
- b[1] = (b1 & ~m1) | (v1 & m1);
-}
-
-void
-ia64_patch_imm64 (u64 insn_addr, u64 val)
-{
- /* The assembler may generate offset pointing to either slot 1
- or slot 2 for a long (2-slot) instruction, occupying slots 1
- and 2. */
- insn_addr &= -16UL;
- ia64_patch(insn_addr + 2, 0x01fffefe000UL,
- (((val & 0x8000000000000000UL) >> 27) | /* bit 63 -> 36 */
- ((val & 0x0000000000200000UL) << 0) | /* bit 21 -> 21 */
- ((val & 0x00000000001f0000UL) << 6) | /* bit 16 -> 22 */
- ((val & 0x000000000000ff80UL) << 20) | /* bit 7 -> 27 */
- ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */));
- ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22);
-}
-
-/*
- * Add more patch points in seperate functions as appropriate
- */
-
-static void __init xen_patch_frametable_miss(u64 offset)
-{
-#ifdef CONFIG_VIRTUAL_FRAME_TABLE
- extern char frametable_miss;
- u64 addr, val;
-
- addr = (u64)&frametable_miss;
- val = get_imm64(addr) + offset;
- ia64_patch_imm64(addr, val);
- ia64_fc(addr);
-#endif
-}
-
-/*
- * We need sometimes to load the physical address of a kernel
- * object. Often we can convert the virtual address to physical
- * at execution time, but sometimes (either for performance reasons
- * or during error recovery) we cannot to this. Patch the marked
- * bundles to load the physical address.
- */
-void __init
-ia64_patch_vtop (unsigned long start, unsigned long end)
-{
- s32 *offp = (s32 *)start;
- u64 ip;
-
- while (offp < (s32 *)end) {
- ip = (u64)offp + *offp;
-
- /* replace virtual address with corresponding physical address */
- ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip)));
- ia64_fc((void *)ip);
- ++offp;
- }
- ia64_sync_i();
- ia64_srlz_i();
-}
-
-void __init xen_patch_kernel(void)
-{
- extern unsigned long xen_pstart;
- unsigned long patch_offset;
-
- patch_offset = xen_pstart - (KERNEL_START - PAGE_OFFSET);
-
- printk("Xen patching physical address access by offset: "
- "0x%lx\n", patch_offset);
-
- xen_patch_frametable_miss(patch_offset);
-
- ia64_sync_i();
- ia64_srlz_i();
-}
diff --git a/xen/arch/ia64/xen/xensetup.c b/xen/arch/ia64/xen/xensetup.c
deleted file mode 100644
index 35d3fe2c8b..0000000000
--- a/xen/arch/ia64/xen/xensetup.c
+++ /dev/null
@@ -1,719 +0,0 @@
-/******************************************************************************
- * xensetup.c
- * Copyright (c) 2004-2005 Hewlett-Packard Co
- * Dan Magenheimer <dan.magenheimer@hp.com>
- */
-
-#include <xen/config.h>
-#include <xen/lib.h>
-#include <xen/errno.h>
-#include <xen/multiboot.h>
-#include <xen/sched.h>
-#include <xen/mm.h>
-#include <xen/hypercall.h>
-#include <xen/gdbstub.h>
-#include <xen/version.h>
-#include <xen/console.h>
-#include <xen/domain.h>
-#include <xen/serial.h>
-#include <xen/trace.h>
-#include <xen/keyhandler.h>
-#include <xen/vga.h>
-#include <asm/meminit.h>
-#include <asm/page.h>
-#include <asm/setup.h>
-#include <asm/vhpt.h>
-#include <xen/string.h>
-#include <asm/vmx.h>
-#include <linux/efi.h>
-#include <asm/iosapic.h>
-#include <xen/softirq.h>
-#include <xen/rcupdate.h>
-#include <asm/sn/simulator.h>
-#include <asm/sal.h>
-#include <xen/cpu.h>
-
-unsigned long total_pages;
-
-char saved_command_line[COMMAND_LINE_SIZE];
-char __initdata dom0_command_line[COMMAND_LINE_SIZE];
-
-cpumask_t cpu_present_map;
-
-extern unsigned long domain0_ready;
-
-int find_max_pfn (unsigned long, unsigned long, void *);
-
-/* FIXME: which header these declarations should be there ? */
-extern void early_setup_arch(char **);
-extern void late_setup_arch(char **);
-extern void hpsim_serial_init(void);
-extern void setup_per_cpu_areas(void);
-extern void mem_init(void);
-extern void init_IRQ(void);
-extern void trap_init(void);
-extern void xen_patch_kernel(void);
-
-/* nosmp: ignore secondary processors */
-static bool_t __initdata opt_nosmp;
-boolean_param("nosmp", opt_nosmp);
-
-/* maxcpus: maximum number of CPUs to activate */
-static unsigned int __initdata max_cpus = NR_CPUS;
-integer_param("maxcpus", max_cpus);
-
-/* xencons: toggle xenconsole input (and irq).
- Note: you have to disable 8250 serials in domains (to avoid use of the
- same resource). */
-static int __initdata opt_xencons = 1;
-integer_param("xencons", opt_xencons);
-
-/* xencons_poll: toggle non-legacy xencons UARTs to run in polling mode */
-static bool_t __initdata opt_xencons_poll;
-boolean_param("xencons_poll", opt_xencons_poll);
-
-#define XENHEAP_DEFAULT_SIZE KERNEL_TR_PAGE_SIZE
-#define XENHEAP_SIZE_MIN (16 * 1024 * 1024) /* 16MBytes */
-unsigned long xenheap_size = XENHEAP_DEFAULT_SIZE;
-unsigned long xen_pstart;
-
-static int __init
-xen_count_pages(u64 start, u64 end, void *arg)
-{
- unsigned long *count = arg;
-
- /* FIXME: do we need consider difference between DMA-usable memory and
- * normal memory? Seems that HV has no requirement to operate DMA which
- * is owned by Dom0? */
- *count += (end - start) >> PAGE_SHIFT;
- return 0;
-}
-
-/*
- * IPF loader only supports one command line currently, for
- * both xen and guest kernel. This function provides pre-parse
- * to mixed command line, to split it into two parts.
- *
- * User should split the parameters by "--", with strings after
- * spliter for guest kernel. Missing "--" means whole line belongs
- * to guest. Example:
- * "com2=57600,8n1 console=com2 -- console=ttyS1 console=tty
- * root=/dev/sda3 ro"
- */
-static char null[4] = { 0 };
-
-void __init early_cmdline_parse(char **cmdline_p)
-{
- char *guest_cmd;
- static const char * const split = "--";
-
- if (*cmdline_p == NULL) {
- *cmdline_p = &null[0];
- saved_command_line[0] = '\0';
- dom0_command_line[0] = '\0';
- return;
- }
-
- guest_cmd = strstr(*cmdline_p, split);
- /* If no spliter, whole line is for guest */
- if (guest_cmd == NULL) {
- guest_cmd = *cmdline_p;
- *cmdline_p = &null[0];
- } else {
- *guest_cmd = '\0'; /* Split boot parameters for xen and guest */
- guest_cmd += strlen(split);
- while (*guest_cmd == ' ') guest_cmd++;
- }
-
- strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
- strlcpy(dom0_command_line, guest_cmd, COMMAND_LINE_SIZE);
- return;
-}
-
-struct ns16550_defaults ns16550_com1 = {
- .data_bits = 8,
- .parity = 'n',
- .stop_bits = 1
-};
-
-unsigned int ns16550_com1_gsi;
-unsigned int ns16550_com1_polarity;
-unsigned int ns16550_com1_trigger;
-
-struct ns16550_defaults ns16550_com2 = {
- .data_bits = 8,
- .parity = 'n',
- .stop_bits = 1
-};
-
-/* efi_print: print efi table at boot */
-static bool_t __initdata opt_efi_print;
-boolean_param("efi_print", opt_efi_print);
-
-/* print EFI memory map: */
-static void __init
-efi_print(void)
-{
- void *efi_map_start, *efi_map_end;
- u64 efi_desc_size;
-
- efi_memory_desc_t *md;
- void *p;
- int i;
-
- if (!opt_efi_print)
- return;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
- md = p;
- printk("mem%02u: type=%2u, attr=0x%016lx, range=[0x%016lx-0x%016lx) "
- "(%luMB)\n", i, md->type, md->attribute, md->phys_addr,
- md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
- md->num_pages >> (20 - EFI_PAGE_SHIFT));
- }
-}
-
-/*
- * These functions are utility functions for getting and
- * testing memory descriptors for allocating the xenheap area.
- */
-static efi_memory_desc_t * __init
-efi_get_md (unsigned long phys_addr)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
- return md;
- }
- return 0;
-}
-
-static int __init
-is_xenheap_usable_memory(efi_memory_desc_t *md)
-{
- if (!(md->attribute & EFI_MEMORY_WB))
- return 0;
-
- switch (md->type) {
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- case EFI_BOOT_SERVICES_CODE:
- case EFI_BOOT_SERVICES_DATA:
- case EFI_CONVENTIONAL_MEMORY:
- return 1;
- }
- return 0;
-}
-
-static inline int __init
-md_overlaps(const efi_memory_desc_t *md, unsigned long phys_addr)
-{
- return (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT));
-}
-
-static inline int __init
-md_overlap_with_boot_param(const efi_memory_desc_t *md)
-{
- return md_overlaps(md, __pa(ia64_boot_param)) ||
- md_overlaps(md, ia64_boot_param->efi_memmap) ||
- md_overlaps(md, ia64_boot_param->command_line);
-}
-
-#define MD_SIZE(md) (md->num_pages << EFI_PAGE_SHIFT)
-#define MD_END(md) ((md)->phys_addr + MD_SIZE(md))
-
-static unsigned long __init
-efi_get_max_addr (void)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
- unsigned long max_addr = 0;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (is_xenheap_usable_memory(md) && MD_END(md) > max_addr)
- max_addr = MD_END(md);
- }
- return max_addr;
-}
-
-extern char __init_begin[], __init_end[];
-static void noinline init_done(void)
-{
- memset(__init_begin, 0, __init_end - __init_begin);
- flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end);
- init_xenheap_pages(__pa(__init_begin), __pa(__init_end));
- printk("Freed %ldkB init memory.\n",
- (long)(__init_end-__init_begin)>>10);
-
- startup_cpu_idle_loop();
-}
-
-struct xen_heap_desc {
- void* xen_heap_start;
- unsigned long xenheap_phys_end;
- efi_memory_desc_t* kern_md;
-};
-
-static int __init
-init_xenheap_mds(unsigned long start, unsigned long end, void *arg)
-{
- struct xen_heap_desc *desc = (struct xen_heap_desc*)arg;
- unsigned long md_end = __pa(desc->xen_heap_start);
- efi_memory_desc_t* md;
-
- start = __pa(start);
- end = __pa(end);
-
- for (md = efi_get_md(md_end);
- md != NULL && md->phys_addr < desc->xenheap_phys_end;
- md = efi_get_md(md_end)) {
- md_end = MD_END(md);
-
- if (md == desc->kern_md ||
- (md->type == EFI_LOADER_DATA && !md_overlap_with_boot_param(md)) ||
- ((md->attribute & EFI_MEMORY_WB) &&
- is_xenheap_usable_memory(md))) {
- unsigned long s = max(start, max(__pa(desc->xen_heap_start),
- md->phys_addr));
- unsigned long e = min(end, min(md_end, desc->xenheap_phys_end));
- init_boot_pages(s, e);
- }
- }
-
- return 0;
-}
-
-int running_on_sim;
-
-static int __init
-is_platform_hp_ski(void)
-{
- int i;
- long cpuid[6];
-
- for (i = 0; i < 5; ++i)
- cpuid[i] = ia64_get_cpuid(i);
-
- if ((cpuid[0] & 0xff) != 'H')
- return 0;
- if ((cpuid[3] & 0xff) != 0x4)
- return 0;
- if (((cpuid[3] >> 8) & 0xff) != 0x0)
- return 0;
- if (((cpuid[3] >> 16) & 0xff) != 0x0)
- return 0;
- if (((cpuid[3] >> 24) & 0x7) != 0x7)
- return 0;
-
- return 1;
-}
-
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
-static int __initdata dom0_vhpt_size_log2;
-integer_param("dom0_vhpt_size_log2", dom0_vhpt_size_log2);
-#endif
-unsigned long xen_fixed_mfn_start __read_mostly;
-unsigned long xen_fixed_mfn_end __read_mostly;
-
-void __init start_kernel(void)
-{
- char *cmdline;
- unsigned long nr_pages;
- unsigned long dom0_memory_start, dom0_memory_size;
- unsigned long dom0_initrd_start, dom0_initrd_size;
- unsigned long md_end, relo_start, relo_end, relo_size = 0;
- struct vcpu *dom0_vcpu0;
- efi_memory_desc_t *kern_md, *last_md, *md;
- unsigned long xenheap_phys_end;
- void *xen_heap_start;
- struct xen_heap_desc heap_desc;
-#ifdef CONFIG_SMP
- int i;
-#endif
-
- /* Be sure the struct shared_info size is <= XSI_SIZE. */
- BUILD_BUG_ON(sizeof(struct shared_info) > XSI_SIZE);
-
- /* Kernel may be relocated by EFI loader */
- xen_pstart = ia64_tpa(KERNEL_START);
-
- running_on_sim = is_platform_hp_ski();
-
- early_setup_arch(&cmdline);
-
- /* We initialise the serial devices very early so we can get debugging. */
- if (running_on_sim)
- hpsim_serial_init();
- else {
- ns16550_init(0, &ns16550_com1);
- ns16550_init(1, &ns16550_com2);
- }
-
-#ifdef CONFIG_VGA
- /* Plug in a default VGA mode */
- vga_console_info.video_type = XEN_VGATYPE_TEXT_MODE_3;
- vga_console_info.u.text_mode_3.font_height = 16; /* generic VGA? */
- vga_console_info.u.text_mode_3.cursor_x =
- ia64_boot_param->console_info.orig_x;
- vga_console_info.u.text_mode_3.cursor_y =
- ia64_boot_param->console_info.orig_y;
- vga_console_info.u.text_mode_3.rows =
- ia64_boot_param->console_info.num_rows;
- vga_console_info.u.text_mode_3.columns =
- ia64_boot_param->console_info.num_cols;
-#endif
-
- console_init_preirq();
-
- if (running_on_sim || ia64_boot_param->domain_start == 0 ||
- ia64_boot_param->domain_size == 0) {
- /* This is possible only with the old elilo, which does not support
- a vmm. Fix now, and continue without initrd. */
- printk ("Your elilo is not Xen-aware. Bootparams fixed\n");
- ia64_boot_param->domain_start = ia64_boot_param->initrd_start;
- ia64_boot_param->domain_size = ia64_boot_param->initrd_size;
- ia64_boot_param->initrd_start = 0;
- ia64_boot_param->initrd_size = 0;
- }
-
- printk("Xen command line: %s\n", saved_command_line);
-
- /*
- * Test if the boot allocator bitmap will overflow xenheap_size. If
- * so, continue to bump it up until we have at least a minimum space
- * for the actual xenheap.
- */
- max_page = efi_get_max_addr() >> PAGE_SHIFT;
- while ((max_page >> 3) > xenheap_size - XENHEAP_SIZE_MIN)
- xenheap_size <<= 1;
-
- xenheap_phys_end = xen_pstart + xenheap_size;
- printk("xen image pstart: 0x%lx, xenheap pend: 0x%lx\n",
- xen_pstart, xenheap_phys_end);
-
- xen_patch_kernel();
-
- kern_md = md = efi_get_md(xen_pstart);
- md_end = __pa(ia64_imva(&_end));
- relo_start = xenheap_phys_end;
-
- /*
- * Scan through the memory descriptors after the kernel
- * image to make sure we have enough room for the xenheap
- * area, pushing out whatever may already be there.
- */
- while (relo_start + relo_size >= md_end) {
- md = efi_get_md(md_end);
-
- if (md == NULL) {
- printk("no room to move loader data. skip moving loader data\n");
- goto skip_move;
- }
-
- md_end = MD_END(md);
- if (relo_start < md->phys_addr)
- relo_start = md->phys_addr;
-
- if (!is_xenheap_usable_memory(md)) {
- /* Skip this area */
- if (md_end > relo_start)
- relo_start = md_end;
- continue;
- }
-
- /*
- * The dom0 kernel or initrd could overlap, reserve space
- * at the end to relocate them later.
- */
- if (md->type == EFI_LOADER_DATA) {
- /* Test for ranges we're not prepared to move */
- if (!md_overlap_with_boot_param(md))
- relo_size += MD_SIZE(md);
-
- /* If range overlaps the end, push out the relocation start */
- if (md_end > relo_start)
- relo_start = md_end;
- }
- }
- last_md = md;
- relo_start = md_end - relo_size;
- relo_end = relo_start + relo_size;
-
- md_end = __pa(ia64_imva(&_end));
-
- /*
- * Move any relocated data out into the previously found relocation
- * area. Any extra memory descriptrs are moved out to the end
- * and set to zero pages.
- */
- for (md = efi_get_md(md_end) ;; md = efi_get_md(md_end)) {
- md_end = MD_END(md);
-
- if (md->type == EFI_LOADER_DATA && !md_overlap_with_boot_param(md)) {
- unsigned long relo_offset;
-
- if (md_overlaps(md, ia64_boot_param->domain_start)) {
- relo_offset = ia64_boot_param->domain_start - md->phys_addr;
- printk("Moving Dom0 kernel image: 0x%lx -> 0x%lx (%ld KiB)\n",
- ia64_boot_param->domain_start, relo_start + relo_offset,
- ia64_boot_param->domain_size >> 10);
- ia64_boot_param->domain_start = relo_start + relo_offset;
- }
- if (ia64_boot_param->initrd_size &&
- md_overlaps(md, ia64_boot_param->initrd_start)) {
- relo_offset = ia64_boot_param->initrd_start - md->phys_addr;
- printk("Moving Dom0 initrd image: 0x%lx -> 0x%lx (%ld KiB)\n",
- ia64_boot_param->initrd_start, relo_start + relo_offset,
- ia64_boot_param->initrd_size >> 10);
- ia64_boot_param->initrd_start = relo_start + relo_offset;
- }
- memcpy(__va(relo_start), __va(md->phys_addr), MD_SIZE(md));
- relo_start += MD_SIZE(md);
- }
-
- if (md == last_md)
- break;
- }
-
- /* Trim the last entry */
- md->num_pages -= (relo_size >> EFI_PAGE_SHIFT);
-
-skip_move:
- reserve_memory();
-
- /* first find highest page frame number */
- max_page = 0;
- efi_memmap_walk(find_max_pfn, &max_page);
- printk("find_memory: efi_memmap_walk returns max_page=%lx\n",max_page);
- efi_print();
-
- xen_heap_start = memguard_init(ia64_imva(&_end));
- printk("xen_heap_start: %p\n", xen_heap_start);
-
- efi_memmap_walk(filter_rsvd_memory, init_boot_pages);
- efi_memmap_walk(xen_count_pages, &nr_pages);
-
- printk("System RAM: %luMB (%lukB)\n",
- nr_pages >> (20 - PAGE_SHIFT),
- nr_pages << (PAGE_SHIFT - 10));
- total_pages = nr_pages;
-
- init_frametable();
-
- trap_init();
-
- /* process SAL system table */
- /* must be before any pal/sal call */
- BUG_ON(efi.sal_systab == EFI_INVALID_TABLE_ADDR);
- ia64_sal_init(__va(efi.sal_systab));
-
- /* early_setup_arch() maps PAL code. */
- identify_vmx_feature();
- /* If vmx feature is on, do necessary initialization for vmx */
- if (vmx_enabled)
- xen_heap_start = vmx_init_env(xen_heap_start, xenheap_phys_end);
-
- /* allocate memory for percpu area
- * per_cpu_init() called from late_set_arch() is called after
- * end_boot_allocate(). It's too late to allocate memory in
- * xenva.
- */
- xen_heap_start = per_cpu_allocate(xen_heap_start, xenheap_phys_end);
-
- heap_desc.xen_heap_start = xen_heap_start;
- heap_desc.xenheap_phys_end = xenheap_phys_end;
- heap_desc.kern_md = kern_md;
- efi_memmap_walk(&init_xenheap_mds, &heap_desc);
-
- printk("Xen heap: %luMB (%lukB)\n",
- (xenheap_phys_end-__pa(xen_heap_start)) >> 20,
- (xenheap_phys_end-__pa(xen_heap_start)) >> 10);
-
- /* for is_xen_fixed_mfn() */
- xen_fixed_mfn_start = virt_to_mfn(&_start);
- xen_fixed_mfn_end = virt_to_mfn(xen_heap_start);
-
- end_boot_allocator();
-
- softirq_init();
- tasklet_subsys_init();
-
- late_setup_arch(&cmdline);
-
- timer_init();
- idle_vcpu[0] = (struct vcpu*) ia64_r13;
- scheduler_init();
-
- alloc_dom_xen_and_dom_io();
- setup_per_cpu_areas();
- mem_init();
-
- local_irq_disable();
- init_IRQ ();
- init_xen_time(); /* initialise the time */
-
- rcu_init();
-
-#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
- open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
-#endif
-
-#ifdef CONFIG_SMP
- if ( opt_nosmp )
- {
- max_cpus = 0;
- smp_num_siblings = 1;
- //boot_cpu_data.x86_num_cores = 1;
- }
-
- /* A vcpu is created for the idle domain on every physical cpu.
- Limit the number of cpus to the maximum number of vcpus. */
- if (max_cpus > MAX_VIRT_CPUS)
- max_cpus = MAX_VIRT_CPUS;
-
- smp_prepare_cpus(max_cpus);
-
- /* We aren't hotplug-capable yet. */
- cpumask_or(&cpu_present_map, &cpu_present_map, &cpu_possible_map);
-
- /* Enable IRQ to receive IPI (needed for ITC sync). */
- local_irq_enable();
-
- do_presmp_initcalls();
-
-printk("num_online_cpus=%d, max_cpus=%d\n",num_online_cpus(),max_cpus);
- for_each_present_cpu ( i )
- {
- if ( num_online_cpus() >= max_cpus )
- break;
- if ( !cpu_online(i) )
- {
- int ret = cpu_up(i);
- if ( ret != 0 )
- printk("Failed to bring up CPU %u (error %d)\n", i, ret);
- }
- }
-
- local_irq_disable();
-
- printk("Brought up %ld CPUs\n", (long)num_online_cpus());
- smp_cpus_done();
-#endif
-
- iommu_setup(); /* setup iommu if available */
-
- do_initcalls();
- sort_main_extable();
-
- init_rid_allocator ();
-
- local_irq_enable();
-
- if (opt_xencons) {
- initialize_keytable();
- if (ns16550_com1_gsi) {
- if (opt_xencons_poll ||
- iosapic_register_intr(ns16550_com1_gsi,
- ns16550_com1_polarity,
- ns16550_com1_trigger) < 0) {
- ns16550_com1.irq = 0;
- ns16550_init(0, &ns16550_com1);
- }
- }
- console_init_postirq();
- }
-
- expose_p2m_init();
-
- /* Create initial domain 0. */
- dom0 = domain_create(0, 0, 0);
- if (dom0 == NULL)
- panic("Error creating domain 0\n");
- domain_set_vhpt_size(dom0, dom0_vhpt_size_log2);
- dom0_vcpu0 = alloc_dom0_vcpu0();
- if (dom0_vcpu0 == NULL || vcpu_late_initialise(dom0_vcpu0) != 0)
- panic("Cannot allocate dom0 vcpu 0\n");
-
- dom0->is_privileged = 1;
- dom0->target = NULL;
-
- /*
- * We're going to setup domain0 using the module(s) that we stashed safely
- * above our heap. The second module, if present, is an initrd ramdisk.
- */
- dom0_memory_start = (unsigned long) __va(ia64_boot_param->domain_start);
- dom0_memory_size = ia64_boot_param->domain_size;
- dom0_initrd_start = (unsigned long) __va(ia64_boot_param->initrd_start);
- dom0_initrd_size = ia64_boot_param->initrd_size;
-
- if ( construct_dom0(dom0, dom0_memory_start, dom0_memory_size,
- dom0_initrd_start,dom0_initrd_size,
- 0) != 0)
- panic("Could not set up DOM0 guest OS\n");
-
- if (!running_on_sim && !IS_MEDUSA()) // slow on ski and pages are pre-initialized to zero
- scrub_heap_pages();
-
- init_trace_bufs();
-
- if (opt_xencons) {
- console_endboot();
- serial_endboot();
- }
-
- domain0_ready = 1;
-
- domain_unpause_by_systemcontroller(dom0);
-
- init_done();
-}
-
-void arch_get_xen_caps(xen_capabilities_info_t *info)
-{
- /* Interface name is always xen-3.0-* for Xen-3.x. */
- int major = 3, minor = 0;
- char s[32];
-
- (*info)[0] = '\0';
-
- snprintf(s, sizeof(s), "xen-%d.%d-ia64 ", major, minor);
- safe_strcat(*info, s);
-
- snprintf(s, sizeof(s), "xen-%d.%d-ia64be ", major, minor);
- safe_strcat(*info, s);
-
- if (vmx_enabled)
- {
- snprintf(s, sizeof(s), "hvm-%d.%d-ia64 ", major, minor);
- safe_strcat(*info, s);
-
- snprintf(s, sizeof(s), "hvm-%d.%d-ia64-sioemu ", major, minor);
- safe_strcat(*info, s);
- }
-}
-
-int __init xen_in_range(paddr_t start, paddr_t end)
-{
- paddr_t xs = __pa(&_start);
- paddr_t xe = __pa(&_end);
-
- return (start < xe) && (end > xs);
-}
diff --git a/xen/arch/ia64/xen/xentime.c b/xen/arch/ia64/xen/xentime.c
deleted file mode 100644
index de83438250..0000000000
--- a/xen/arch/ia64/xen/xentime.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * xen/arch/ia64/time.c
- *
- * Copyright (C) 2005 Hewlett-Packard Co
- * Dan Magenheimer <dan.magenheimer@hp.com>
- */
-
-#include <linux/config.h>
-
-#include <linux/cpu.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/profile.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/interrupt.h>
-#include <linux/efi.h>
-#include <linux/profile.h>
-#include <linux/timex.h>
-
-#include <asm/machvec.h>
-#include <asm/delay.h>
-#include <asm/hw_irq.h>
-#include <asm/ptrace.h>
-#include <asm/sal.h>
-#include <asm/sections.h>
-#include <asm/system.h>
-#include <asm/vcpu.h>
-#include <linux/jiffies.h> // not included by xen/sched.h
-#include <xen/softirq.h>
-#include <xen/event.h>
-
-/* FIXME: where these declarations should be there ? */
-extern void ia64_init_itm(void);
-
-seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
-
-#define TIME_KEEPER_ID 0
-unsigned long domain0_ready = 0;
-static s_time_t stime_irq = 0x0; /* System time at last 'time update' */
-static unsigned long itc_scale __read_mostly, ns_scale __read_mostly;
-static unsigned long itc_at_irq;
-
-static u32 wc_sec, wc_nsec; /* UTC time at last 'time update'. */
-static void ia64_wallclock_set(void);
-
-/* We don't expect an absolute cycle value here, since then no way
- * to prevent overflow for large norminator. Normally this conversion
- * is used for relative offset.
- */
-u64 cycle_to_ns(u64 cycle)
-{
- return (cycle * itc_scale) >> 32;
-}
-
-static u64 ns_to_cycle(u64 ns)
-{
- return (ns * ns_scale) >> 32;
-}
-
-static inline u64 get_time_delta(void)
-{
- s64 delta_itc;
- u64 cur_itc;
-
- cur_itc = ia64_get_itc();
-
- delta_itc = (s64)(cur_itc - itc_at_irq);
-
- /* Ensure that the returned system time is monotonically increasing. */
- if ( unlikely(delta_itc < 0) ) delta_itc = 0;
- return cycle_to_ns(delta_itc);
-}
-
-
-s_time_t get_s_time(void)
-{
- s_time_t now;
- unsigned long seq;
-
- do {
- seq = read_seqbegin(&xtime_lock);
- now = stime_irq + get_time_delta();
- } while (unlikely(read_seqretry(&xtime_lock, seq)));
-
- return now;
-}
-
-void update_vcpu_system_time(struct vcpu *v)
-{
- /* N-op here, and let dom0 to manage system time directly */
- return;
-}
-
-void
-xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
-{
- unsigned long new_itm, old_itc;
-
- new_itm = local_cpu_data->itm_next;
- while (1) {
- if (smp_processor_id() == TIME_KEEPER_ID) {
- /*
- * Here we are in the timer irq handler. We have irqs locally
- * disabled, but we don't know if the timer_bh is running on
- * another CPU. We need to avoid to SMP race by acquiring the
- * xtime_lock.
- */
- write_seqlock(&xtime_lock);
- /* Updates system time (nanoseconds since boot). */
- old_itc = itc_at_irq;
- itc_at_irq = ia64_get_itc();
- stime_irq += cycle_to_ns(itc_at_irq - old_itc);
-
- write_sequnlock(&xtime_lock);
- }
-
- local_cpu_data->itm_next = new_itm;
-
- if (time_after(new_itm, ia64_get_itc()))
- break;
-
- new_itm += local_cpu_data->itm_delta;
- }
-
- if (!is_idle_domain(current->domain) && !VMX_DOMAIN(current)) {
- if (vcpu_timer_expired(current)) {
- vcpu_pend_timer(current);
- } else {
- // ensure another timer interrupt happens
- // even if domain doesn't
- vcpu_set_next_timer(current);
- raise_softirq(TIMER_SOFTIRQ);
- return;
- }
- }
-
- do {
- /*
- * If we're too close to the next clock tick for
- * comfort, we increase the safety margin by
- * intentionally dropping the next tick(s). We do NOT
- * update itm.next because that would force us to call
- * do_timer() which in turn would let our clock run
- * too fast (with the potentially devastating effect
- * of losing monotony of time).
- */
- while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
- new_itm += local_cpu_data->itm_delta;
- ia64_set_itm(new_itm);
- /* double check, in case we got hit by a (slow) PMI: */
- } while (time_after_eq(ia64_get_itc(), new_itm));
- raise_softirq(TIMER_SOFTIRQ);
-}
-
-static struct irqaction __read_mostly xen_timer_irqaction = {
- .handler = (void *) xen_timer_interrupt,
- .name = "timer"
-};
-
-void __init
-ia64_time_init (void)
-{
- register_percpu_irq(IA64_TIMER_VECTOR, &xen_timer_irqaction);
- ia64_init_itm();
-}
-
-/* wallclock set from efi.get_time */
-static void ia64_wallclock_set()
-{
- efi_time_t tv;
- efi_time_cap_t tc;
- efi_status_t status = 0;
-
- status = (*efi.get_time)(&tv, &tc);
- if (status != 0) {
- wc_sec = 0; wc_nsec = 0;
- printk("EFIRTC Get Time failed\n");
- return;
- }
-
- wc_sec = mktime(tv.year, tv.month, tv.day, tv.hour, tv.minute, tv.second);
- wc_nsec = tv.nanosecond;
- if (tv.timezone != EFI_UNSPECIFIED_TIMEZONE) {
- wc_sec -= tv.timezone * 60;
- printk("Time Zone is %d minutes difference from UTC\n", tv.timezone);
- } else {
- printk("Time Zone is not specified on EFIRTC\n");
- }
-}
-
-/* Late init function (after all CPUs are booted). */
-int __init init_xen_time()
-{
- ia64_time_init();
- ia64_wallclock_set();
- itc_scale = 1000000000UL << 32 ;
- itc_scale /= local_cpu_data->itc_freq;
- ns_scale = (local_cpu_data->itc_freq << 32) / 1000000000UL;
-
- /* System time ticks from zero. */
- stime_irq = (s_time_t)0;
- itc_at_irq = ia64_get_itc();
-
- printk("Time init:\n");
- printk(".... System Time: %ldns\n", NOW());
- printk(".... scale: %16lX\n", itc_scale);
-
- return 0;
-}
-
-int reprogram_timer(s_time_t timeout)
-{
- struct vcpu *v = current;
- s_time_t expire;
- unsigned long seq, cur_itc, itm_next;
-
- if (!domain0_ready || timeout == 0) return 1;
-
- do {
- seq = read_seqbegin(&xtime_lock);
- if ((expire = timeout - NOW()) < 0)
- return 0;
-
- cur_itc = ia64_get_itc();
- itm_next = cur_itc + ns_to_cycle(expire);
- } while (unlikely(read_seqretry(&xtime_lock, seq)));
-
- local_cpu_data->itm_next = itm_next;
- vcpu_set_next_timer(v);
- return 1;
-}
-
-void send_timer_event(struct vcpu *v)
-{
- send_guest_vcpu_virq(v, VIRQ_TIMER);
-}
-
-/* This is taken from xen/arch/x86/time.c.
- * and the value is replaced
- * from 1000000000ull to NSEC_PER_SEC.
- */
-struct tm wallclock_time(void)
-{
- uint64_t seconds;
-
- if (!wc_sec)
- return (struct tm) { 0 };
-
- seconds = NOW() + (wc_sec * NSEC_PER_SEC) + wc_nsec;
- do_div(seconds, NSEC_PER_SEC);
- return gmtime(seconds);
-}
-
-void get_wallclock(uint64_t *sec, uint64_t *nsec, uint64_t *now)
-{
- uint64_t n = NOW();
- uint64_t nano = n + wc_nsec;
- *sec = wc_sec + nano / NSEC_PER_SEC;
- *nsec = nano % NSEC_PER_SEC;
- *now = n;
-}
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 4c7fdd41db..9eba8bc2e0 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -58,7 +58,6 @@ subdir-$(CONFIG_COMPAT) += compat
subdir-$(x86_32) += hvm
subdir-$(x86_64) += hvm
-subdir-$(ia64) += hvm
subdir-y += libelf
subdir-$(HAS_DEVICE_TREE) += libfdt
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index e6706df53b..04c70388ca 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1514,9 +1514,7 @@ gnttab_transfer(
goto copyback;
}
-#ifndef __ia64__ /* IA64 implicitly replaces the old page in steal_page(). */
guest_physmap_remove_page(d, gop.mfn, mfn, 0);
-#endif
flush_tlb_mask(d->domain_dirty_cpumask);
/* Find the target domain. */
diff --git a/xen/common/kexec.c b/xen/common/kexec.c
index 71a3995b82..444cae1b03 100644
--- a/xen/common/kexec.c
+++ b/xen/common/kexec.c
@@ -721,11 +721,7 @@ static void crash_save_vmcoreinfo(void)
VMCOREINFO_STRUCT_SIZE(domain);
VMCOREINFO_OFFSET(page_info, count_info);
-#ifdef __ia64__
- VMCOREINFO_OFFSET_SUB(page_info, u.inuse, _domain);
-#else
VMCOREINFO_OFFSET_SUB(page_info, v.inuse, _domain);
-#endif
VMCOREINFO_OFFSET(domain, domain_id);
VMCOREINFO_OFFSET(domain, next_in_list);
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 3f02380fdf..f0d396109b 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -23,9 +23,7 @@
#include <xen/tmem_xen.h>
#include <asm/current.h>
#include <asm/hardirq.h>
-#ifndef __ia64__
#include <asm/p2m.h>
-#endif
#include <xen/numa.h>
#include <public/memory.h>
#include <xsm/xsm.h>
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 75d1eeae98..1d7359db29 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1141,7 +1141,7 @@ void __init scrub_heap_pages(void)
* XEN-HEAP SUB-ALLOCATOR
*/
-#if !defined(__x86_64__) && !defined(__ia64__)
+#if !defined(__x86_64__)
void init_xenheap_pages(paddr_t ps, paddr_t pe)
{
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 37e82896f5..84b5bb27d6 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -88,7 +88,7 @@ void tmh_copy_page(char *to, char*from)
#endif
}
-#if defined(__ia64__) || defined (CONFIG_ARM)
+#if defined(CONFIG_ARM)
static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn,
pfp_t **pcli_pfp, bool_t cli_write)
{
diff --git a/xen/drivers/cpufreq/cpufreq.c b/xen/drivers/cpufreq/cpufreq.c
index b1be8c18ca..a15f6b50c3 100644
--- a/xen/drivers/cpufreq/cpufreq.c
+++ b/xen/drivers/cpufreq/cpufreq.c
@@ -442,16 +442,6 @@ int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *dom0_px_in
goto out;
}
-#ifdef CONFIG_IA64
- /* for IA64, currently it only supports FFH */
- if (dom0_px_info->control_register.space_id !=
- ACPI_ADR_SPACE_FIXED_HARDWARE)
- {
- ret = -EINVAL;
- goto out;
- }
-#endif
-
memcpy ((void *)&pxpt->control_register,
(void *)&dom0_px_info->control_register,
sizeof(struct xen_pct_register));
@@ -493,7 +483,6 @@ int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *dom0_px_in
{
#ifdef CONFIG_X86
/* for X86, check domain coordination */
- /* for IA64, _PSD is optional for current IA64 cpufreq algorithm */
if (dom0_px_info->shared_type != CPUFREQ_SHARED_TYPE_ALL &&
dom0_px_info->shared_type != CPUFREQ_SHARED_TYPE_ANY &&
dom0_px_info->shared_type != CPUFREQ_SHARED_TYPE_HW)
diff --git a/xen/drivers/passthrough/Makefile b/xen/drivers/passthrough/Makefile
index c965db36e8..7c40fa52dc 100644
--- a/xen/drivers/passthrough/Makefile
+++ b/xen/drivers/passthrough/Makefile
@@ -1,5 +1,4 @@
subdir-$(x86) += vtd
-subdir-$(ia64) += vtd
subdir-$(x86) += amd
subdir-$(x86_64) += x86
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index f83b860eb2..b4dc641eae 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -419,7 +419,6 @@ int hvm_do_IRQ_dpci(struct domain *d, struct pirq *pirq)
return 1;
}
-#ifdef SUPPORT_MSI_REMAPPING
/* called with d->event_lock held */
static void __msi_pirq_eoi(struct hvm_pirq_dpci *pirq_dpci)
{
@@ -479,7 +478,6 @@ static int hvm_pci_msi_assert(struct domain *d,
? send_guest_pirq(d, pirq)
: vmsi_deliver_pirq(d, pirq_dpci));
}
-#endif
static int _hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci,
void *arg)
@@ -489,13 +487,12 @@ static int _hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci,
if ( test_and_clear_bool(pirq_dpci->masked) )
{
-#ifdef SUPPORT_MSI_REMAPPING
if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
{
hvm_pci_msi_assert(d, pirq_dpci);
return 0;
}
-#endif
+
list_for_each_entry ( digl, &pirq_dpci->digl_list, list )
{
struct pirq *info = dpci_pirq(pirq_dpci);
@@ -508,13 +505,11 @@ static int _hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci,
hvm_pci_intx_assert(d, device, intx);
pirq_dpci->pending++;
-#ifdef SUPPORT_MSI_REMAPPING
if ( pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE )
{
/* for translated MSI to INTx interrupt, eoi as early as possible */
__msi_pirq_eoi(pirq_dpci);
}
-#endif
}
/*
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 0a3e2df080..8eba541d1a 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -605,17 +605,6 @@ int iommu_do_domctl(
bus = (domctl->u.assign_device.machine_sbdf >> 8) & 0xff;
devfn = domctl->u.assign_device.machine_sbdf & 0xff;
-#ifdef __ia64__ /* XXX Is this really needed? */
- if ( device_assigned(seg, bus, devfn) )
- {
- printk(XENLOG_G_ERR "XEN_DOMCTL_assign_device: "
- "%04x:%02x:%02x.%u already assigned, or non-existent\n",
- seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- ret = -EINVAL;
- goto assign_device_out;
- }
-#endif
-
ret = assign_device(d, seg, bus, devfn);
if ( ret )
printk(XENLOG_G_ERR "XEN_DOMCTL_assign_device: "
@@ -644,14 +633,6 @@ int iommu_do_domctl(
bus = (domctl->u.assign_device.machine_sbdf >> 8) & 0xff;
devfn = domctl->u.assign_device.machine_sbdf & 0xff;
-#ifdef __ia64__ /* XXX Is this really needed? */
- if ( !device_assigned(seg, bus, devfn) )
- {
- ret = -EINVAL;
- goto deassign_device_out;
- }
-#endif
-
spin_lock(&pcidevs_lock);
ret = deassign_device(d, seg, bus, devfn);
spin_unlock(&pcidevs_lock);
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 50d337a0cd..90749cea3b 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -696,7 +696,6 @@ void __init setup_dom0_pci_devices(
spin_unlock(&pcidevs_lock);
}
-#ifdef SUPPORT_MSI_REMAPPING
static int _dump_pci_devices(struct pci_seg *pseg, void *arg)
{
struct pci_dev *pdev;
@@ -738,8 +737,6 @@ static int __init setup_dump_pcidevs(void)
return 0;
}
__initcall(setup_dump_pcidevs);
-#endif
-
/*
* Local variables:
diff --git a/xen/drivers/passthrough/vtd/Makefile b/xen/drivers/passthrough/vtd/Makefile
index 5b08912694..56d0153945 100644
--- a/xen/drivers/passthrough/vtd/Makefile
+++ b/xen/drivers/passthrough/vtd/Makefile
@@ -1,5 +1,4 @@
subdir-$(x86) += x86
-subdir-$(ia64) += ia64
obj-y += iommu.o
obj-y += dmar.o
diff --git a/xen/drivers/passthrough/vtd/ia64/Makefile b/xen/drivers/passthrough/vtd/ia64/Makefile
deleted file mode 100644
index 85243e3aa7..0000000000
--- a/xen/drivers/passthrough/vtd/ia64/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y += vtd.o
diff --git a/xen/drivers/passthrough/vtd/ia64/vtd.c b/xen/drivers/passthrough/vtd/ia64/vtd.c
deleted file mode 100644
index 10963c0358..0000000000
--- a/xen/drivers/passthrough/vtd/ia64/vtd.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2008, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) Allen Kay <allen.m.kay@intel.com>
- * Copyright (C) Weidong Han <weidong.han@intel.com>
- */
-
-#include <xen/sched.h>
-#include <xen/softirq.h>
-#include <xen/domain_page.h>
-#include <xen/iommu.h>
-#include <xen/numa.h>
-#include <asm/xensystem.h>
-#include <asm/sal.h>
-#include "../iommu.h"
-#include "../dmar.h"
-#include "../extern.h"
-#include "../vtd.h"
-
-
-int vector_irq[NR_VECTORS] __read_mostly = {
- [0 ... NR_VECTORS - 1] = FREE_TO_ASSIGN_IRQ
-};
-/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-u8 irq_vector[NR_IRQS] __read_mostly;
-
-void *map_vtd_domain_page(u64 maddr)
-{
- return (void *)((u64)map_domain_page(maddr >> PAGE_SHIFT) |
- (maddr & (PAGE_SIZE - PAGE_SIZE_4K)));
-}
-
-void unmap_vtd_domain_page(void *va)
-{
- unmap_domain_page(va);
-}
-
-unsigned int get_cache_line_size(void)
-{
- return L1_CACHE_BYTES;
-}
-
-void cacheline_flush(char * addr)
-{
- ia64_fc(addr);
- ia64_sync_i();
- ia64_srlz_i();
-}
-
-void flush_all_cache()
-{
- ia64_sal_cache_flush(3);
-}
-
-void *__init map_to_nocache_virt(int nr_iommus, u64 maddr)
-{
- return (void *) ( maddr + __IA64_UNCACHED_OFFSET);
-}
-
-void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
-{
- /* dummy */
-}
-
-static int __init do_dom0_iommu_mapping(unsigned long start,
- unsigned long end, void *arg)
-{
- unsigned long tmp, pfn, j, page_addr = start;
- struct domain *d = (struct domain *)arg;
-
- extern int xen_in_range(paddr_t start, paddr_t end);
- /* Set up 1:1 page table for dom0 for all Ram except Xen bits.*/
-
- while (page_addr < end)
- {
- if (xen_in_range(page_addr, page_addr + PAGE_SIZE))
- continue;
-
- pfn = page_addr >> PAGE_SHIFT;
- tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
- for ( j = 0; j < tmp; j++ )
- iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j),
- IOMMUF_readable|IOMMUF_writable);
-
- page_addr += PAGE_SIZE;
-
- if (!(pfn & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
- process_pending_softirqs();
- }
- return 0;
-}
-
-void __init iommu_set_dom0_mapping(struct domain *d)
-{
- if (dom0)
- BUG_ON(d != dom0);
- efi_memmap_walk(do_dom0_iommu_mapping, d);
-}
diff --git a/xen/drivers/passthrough/vtd/intremap.c b/xen/drivers/passthrough/vtd/intremap.c
index 3eddbdf85f..5fe4246568 100644
--- a/xen/drivers/passthrough/vtd/intremap.c
+++ b/xen/drivers/passthrough/vtd/intremap.c
@@ -31,26 +31,7 @@
#include "vtd.h"
#include "extern.h"
-#ifdef __ia64__
-#define nr_ioapics iosapic_get_nr_iosapics()
-#define nr_ioapic_entries(i) iosapic_get_nr_pins(i)
-#define __io_apic_read(apic, reg) \
- (*IO_APIC_BASE(apic) = reg, *(IO_APIC_BASE(apic)+4))
-#define __io_apic_write(apic, reg, val) \
- (*IO_APIC_BASE(apic) = reg, *(IO_APIC_BASE(apic)+4) = (val))
-#define __ioapic_read_entry(apic, pin, raw) ({ \
- struct IO_xAPIC_route_entry _e_; \
- ASSERT(raw); \
- ((u32 *)&_e_)[0] = __io_apic_read(apic, 0x10 + 2 * (pin)); \
- ((u32 *)&_e_)[1] = __io_apic_read(apic, 0x11 + 2 * (pin)); \
- _e_; \
-})
-#define __ioapic_write_entry(apic, pin, raw, ent) ({ \
- ASSERT(raw); \
- __io_apic_write(apic, 0x10 + 2 * (pin), ((u32 *)&(ent))[0]); \
- __io_apic_write(apic, 0x11 + 2 * (pin), ((u32 *)&(ent))[1]); \
-})
-#else
+#if defined(__i386__) || defined(__x86_64__)
#include <asm/apic.h>
#include <asm/io_apic.h>
#define nr_ioapic_entries(i) nr_ioapic_entries[i]
@@ -326,8 +307,6 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu,
new_ire.lo.dst = value;
else
new_ire.lo.dst = (value >> 24) << 8;
-#else /* __ia64__ */
- new_ire.lo.dst = value >> 16;
#endif
}
else
@@ -625,12 +604,8 @@ static int msi_msg_to_remap_entry(
new_ire.lo.dm = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
new_ire.lo.tm = (msg->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
new_ire.lo.dlm = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
-#ifdef CONFIG_X86
/* Hardware require RH = 1 for LPR delivery mode */
new_ire.lo.rh = (new_ire.lo.dlm == dest_LowestPrio);
-#else
- new_ire.lo.rh = 0;
-#endif
new_ire.lo.avail = 0;
new_ire.lo.res_1 = 0;
new_ire.lo.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) &
@@ -703,18 +678,6 @@ void msi_msg_write_remap_rte(
msi_msg_to_remap_entry(iommu, pdev, msi_desc, msg);
}
-#elif defined(__ia64__)
-void msi_msg_read_remap_rte(
- struct msi_desc *msi_desc, struct msi_msg *msg)
-{
- /* TODO. */
-}
-
-void msi_msg_write_remap_rte(
- struct msi_desc *msi_desc, struct msi_msg *msg)
-{
- /* TODO. */
-}
#endif
int enable_intremap(struct iommu *iommu, int eim)
@@ -838,8 +801,6 @@ out:
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-#ifndef __ia64__
-
/*
* This function is used to enable Interrupt remapping when
* enable x2apic
@@ -914,5 +875,3 @@ void iommu_disable_x2apic_IR(void)
for_each_drhd_unit ( drhd )
disable_qinval(drhd->iommu);
}
-
-#endif /* !__ia64__ */
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 1add32ef1d..14c05817d5 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -33,7 +33,7 @@
#include <xen/keyhandler.h>
#include <asm/msi.h>
#include <asm/irq.h>
-#ifndef __ia64__
+#if defined(__i386__) || defined(__x86_64__)
#include <asm/hvm/vmx/vmx.h>
#include <asm/p2m.h>
#include <mach_apic.h>
@@ -44,10 +44,6 @@
#include "vtd.h"
#include "../ats.h"
-#ifdef __ia64__
-#define nr_ioapics iosapic_get_nr_iosapics()
-#endif
-
/* Possible unfiltered LAPIC/MSI messages from untrusted sources? */
bool_t __read_mostly untrusted_msi;
@@ -1057,11 +1053,7 @@ static unsigned int dma_msi_startup(struct irq_desc *desc)
return 0;
}
-#ifndef __ia64__
static void dma_msi_end(struct irq_desc *desc, u8 vector)
-#else
-static void dma_msi_end(struct irq_desc *desc)
-#endif
{
dma_msi_unmask(desc);
ack_APIC_irq();
@@ -1841,7 +1833,6 @@ void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
static int vtd_ept_page_compatible(struct iommu *iommu)
{
-#ifndef __ia64__
u64 ept_cap, vtd_cap = iommu->cap;
/* EPT is not initialised yet, so we must check the capability in
@@ -1851,9 +1842,6 @@ static int vtd_ept_page_compatible(struct iommu *iommu)
return ( ept_has_2mb(ept_cap) == cap_sps_2mb(vtd_cap)
&& ept_has_1gb(ept_cap) == cap_sps_1gb(vtd_cap) );
-#else
- return 0;
-#endif
}
/*
@@ -1861,7 +1849,6 @@ static int vtd_ept_page_compatible(struct iommu *iommu)
*/
void iommu_set_pgd(struct domain *d)
{
-#ifndef __ia64__
struct hvm_iommu *hd = domain_hvm_iommu(d);
mfn_t pgd_mfn;
@@ -1872,7 +1859,6 @@ void iommu_set_pgd(struct domain *d)
pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
hd->pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
-#endif
}
static int rmrr_identity_mapping(struct domain *d,
diff --git a/xen/drivers/passthrough/vtd/utils.c b/xen/drivers/passthrough/vtd/utils.c
index 40e2b597b6..07337b9e48 100644
--- a/xen/drivers/passthrough/vtd/utils.c
+++ b/xen/drivers/passthrough/vtd/utils.c
@@ -301,8 +301,7 @@ static void dump_iommu_info(unsigned char key)
}
}
#else
- printk("%s: not implemnted on IA64 for now.\n", __func__);
- /* ia64: TODO */
+ printk("%s: not implemented for now\n", __func__);
#endif
}
diff --git a/xen/drivers/passthrough/vtd/vtd.h b/xen/drivers/passthrough/vtd/vtd.h
index 499c3d3eb1..02e9d7854c 100644
--- a/xen/drivers/passthrough/vtd/vtd.h
+++ b/xen/drivers/passthrough/vtd/vtd.h
@@ -26,44 +26,8 @@
#define MAP_ME_PHANTOM_FUNC 1
#define UNMAP_ME_PHANTOM_FUNC 0
-/* Accomodate both IOAPIC and IOSAPIC. */
-#ifndef __ia64__
+/* Allow for both IOAPIC and IOSAPIC. */
#define IO_xAPIC_route_entry IO_APIC_route_entry
-#else
-struct IO_xAPIC_route_entry {
- __u32 vector : 8,
- delivery_mode : 3, /* 000: FIXED
- * 001: lowest prio
- * 111: ExtINT
- */
- dest_mode : 1, /* 0: physical, 1: logical */
- delivery_status : 1,
- polarity : 1,
- irr : 1,
- trigger : 1, /* 0: edge, 1: level */
- mask : 1, /* 0: enabled, 1: disabled */
- __reserved_2 : 15;
-
- union {
- struct { __u32
- __reserved_1 : 24,
- physical_dest : 4,
- __reserved_2 : 4;
- } physical;
-
- struct { __u32
- __reserved_1 : 24,
- logical_dest : 8;
- } logical;
-
- struct { __u32
- __reserved_1 : 16,
- dest_id : 16;
- };
- } dest;
-
-} __attribute__ ((packed));
-#endif
struct IO_APIC_route_remap_entry {
union {
diff --git a/xen/include/Makefile b/xen/include/Makefile
index 004c9ccef5..62846a156d 100644
--- a/xen/include/Makefile
+++ b/xen/include/Makefile
@@ -38,7 +38,7 @@ suffix-$(CONFIG_X86) := \#pragma pack()
endif
public-$(CONFIG_X86) := $(wildcard public/arch-x86/*.h public/arch-x86/*/*.h)
-public-$(CONFIG_IA64) := $(wildcard public/arch-ia64/*.h public/arch-ia64/*/*.h)
+public-$(CONFIG_ARM) := $(wildcard public/arch-arm/*.h public/arch-arm/*/*.h)
.PHONY: all
all: $(headers-y)
@@ -74,8 +74,6 @@ compat/xlat.h: xlat.lst $(filter-out compat/xlat.h,$(headers-y)) $(BASEDIR)/tool
mv -f $@.new $@
ifeq ($(XEN_TARGET_ARCH),$(XEN_COMPILE_ARCH))
-# public/arch-ia64.h explicitly bails on __STRICT_ANSI__
-ifeq ($(CONFIG_IA64),)
all: headers.chk
@@ -84,7 +82,6 @@ headers.chk: $(filter-out public/arch-% public/%ctl.h public/xsm/% public/%hvm/s
mv $@.new $@
endif
-endif
clean::
rm -rf compat headers.chk
diff --git a/xen/include/asm-ia64/bug.h b/xen/include/asm-ia64/bug.h
deleted file mode 100644
index b96dd8e3bf..0000000000
--- a/xen/include/asm-ia64/bug.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __IA64_BUG_H__
-#define __IA64_BUG_H__
-
-#define BUG() __bug(__FILE__, __LINE__)
-#define WARN() __warn(__FILE__, __LINE__)
-
-#define dump_execution_state() \
- do { \
- printk("FIXME: implement ia64 dump_execution_state()\n"); \
- dump_stack(); \
- } while (0)
-
-#define vcpu_show_execution_state(v) printk("FIXME: implement ia64 vcpu_show_execution_state()\n")
-
-#endif /* __IA64_BUG_H__ */
diff --git a/xen/include/asm-ia64/bundle.h b/xen/include/asm-ia64/bundle.h
deleted file mode 100644
index 8d8d1c9630..0000000000
--- a/xen/include/asm-ia64/bundle.h
+++ /dev/null
@@ -1,239 +0,0 @@
-#ifndef _XEN_IA64_BUNDLE_H
-#define _XEN_IA64_BUNDLE_H
-
-typedef unsigned long IA64_INST;
-
-typedef union U_IA64_BUNDLE {
- unsigned long i64[2];
- struct { unsigned long template:5,slot0:41,slot1a:18,slot1b:23,slot2:41; };
- // NOTE: following doesn't work because bitfields can't cross natural
- // size boundaries
- //struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; };
-} IA64_BUNDLE;
-
-typedef enum E_IA64_SLOT_TYPE { I, M, F, B, L, ILLEGAL } IA64_SLOT_TYPE;
-
-typedef union U_INST64_A5 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5, imm9d:9, s:1, major:4; };
-} INST64_A5;
-
-typedef union U_INST64_B4 {
- IA64_INST inst;
- struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6, wh:2, d:1, un1:1, major:4; };
-} INST64_B4;
-
-typedef union U_INST64_B8 {
- IA64_INST inst;
- struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
-} INST64_B8;
-
-typedef union U_INST64_B9 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
-} INST64_B9;
-
-typedef union U_INST64_I18 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm20:20, y:1, x6:6, x3:3, i:1, major:4; };
-} INST64_I18;
-
-typedef union U_INST64_I19 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
-} INST64_I19;
-
-typedef union U_INST64_I26 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4;};
-} INST64_I26;
-
-typedef union U_INST64_I27 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4;};
-} INST64_I27;
-
-typedef union U_INST64_I28 { // not privileged (mov from AR)
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4;};
-} INST64_I28;
-
-typedef union U_INST64_M28 {
- IA64_INST inst;
- struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, x:1, major:4;};
-} INST64_M28;
-
-typedef union U_INST64_M29 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4;};
-} INST64_M29;
-
-typedef union U_INST64_M30 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, imm:7, ar3:7,x4:4,x2:2,x3:3,s:1,major:4;};
-} INST64_M30;
-
-typedef union U_INST64_M31 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4;};
-} INST64_M31;
-
-typedef union U_INST64_M32 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4;};
-} INST64_M32;
-
-typedef union U_INST64_M33 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M33;
-
-typedef union U_INST64_M35 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-
-} INST64_M35;
-
-typedef union U_INST64_M36 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
-} INST64_M36;
-
-typedef union U_INST64_M37 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm20a:20,:1, x4:4,x2:2,x3:3, i:1, major:4; };
-} INST64_M37;
-
-typedef union U_INST64_M41 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-} INST64_M41;
-
-typedef union U_INST64_M42 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M42;
-
-typedef union U_INST64_M43 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M43;
-
-typedef union U_INST64_M44 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
-} INST64_M44;
-
-typedef union U_INST64_M45 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M45;
-
-typedef union U_INST64_M46 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6, x3:3, un1:1, major:4; };
-} INST64_M46;
-
-typedef union U_INST64_M47 {
- IA64_INST inst;
- struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
-} INST64_M47;
-
-typedef union U_INST64_M1{
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
-} INST64_M1;
-
-typedef union U_INST64_M2{
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
-} INST64_M2;
-
-typedef union U_INST64_M3{
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2, x6:6, s:1, major:4; };
-} INST64_M3;
-
-typedef union U_INST64_M4 {
- IA64_INST inst;
- struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
-} INST64_M4;
-
-typedef union U_INST64_M5 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2, x6:6, s:1, major:4; };
-} INST64_M5;
-
-typedef union U_INST64_M6 {
- IA64_INST inst;
- struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
-} INST64_M6;
-
-typedef union U_INST64_M9 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
-} INST64_M9;
-
-typedef union U_INST64_M10 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2, x6:6, s:1, major:4; };
-} INST64_M10;
-
-typedef union U_INST64_M12 {
- IA64_INST inst;
- struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
-} INST64_M12;
-
-typedef union U_INST64_M15 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2, x6:6, s:1, major:4; };
-} INST64_M15;
-
-typedef union U_INST64 {
- IA64_INST inst;
- struct { unsigned long :37, major:4; } generic;
- INST64_A5 A5; // used in build_hypercall_bundle only
- INST64_B4 B4; // used in build_hypercall_bundle only
- INST64_B8 B8; // rfi, bsw.[01]
- INST64_B9 B9; // break.b
- INST64_I18 I18; // nop.i used in build_fpswa_hypercall_bundle only
- INST64_I19 I19; // used in build_hypercall_bundle only
- INST64_I26 I26; // mov register to ar (I unit)
- INST64_I27 I27; // mov immediate to ar (I unit)
- INST64_I28 I28; // mov from ar (I unit)
- INST64_M1 M1; // ld integer
- INST64_M2 M2;
- INST64_M3 M3;
- INST64_M4 M4; // st integer
- INST64_M5 M5;
- INST64_M6 M6; // ldfd floating pointer
- INST64_M9 M9; // stfd floating pointer
- INST64_M10 M10; // stfd floating pointer
- INST64_M12 M12; // ldfd pair floating pointer
- INST64_M15 M15; // lfetch + imm update
- INST64_M28 M28; // purge translation cache entry
- INST64_M29 M29; // mov register to ar (M unit)
- INST64_M30 M30; // mov immediate to ar (M unit)
- INST64_M31 M31; // mov from ar (M unit)
- INST64_M32 M32; // mov reg to cr
- INST64_M33 M33; // mov from cr
- INST64_M35 M35; // mov to psr
- INST64_M36 M36; // mov from psr
- INST64_M37 M37; // break.m
- INST64_M41 M41; // translation cache insert
- INST64_M42 M42; // mov to indirect reg/translation reg insert
- INST64_M43 M43; // mov from indirect reg
- INST64_M44 M44; // set/reset system mask
- INST64_M45 M45; // translation purge
- INST64_M46 M46; // translation access (tpa,tak)
- INST64_M47 M47; // purge translation entry
-} INST64;
-
-#ifdef __XEN__
-extern unsigned long __vmx_get_domain_bundle(unsigned long iip, IA64_BUNDLE *pbundle);
-extern IA64_BUNDLE __get_domain_bundle(unsigned long iip);
-#endif
-
-#define MASK_41 ((unsigned long)0x1ffffffffff)
-
-#endif /* _XEN_IA64_BUNDLE_H */
diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h
deleted file mode 100644
index 9ca237277e..0000000000
--- a/xen/include/asm-ia64/config.h
+++ /dev/null
@@ -1,290 +0,0 @@
-#ifndef _IA64_CONFIG_H_
-#define _IA64_CONFIG_H_
-
-#undef DEBUG_PFMON
-
-// manufactured from component pieces
-
-// defined in linux/arch/ia64/defconfig
-#define CONFIG_IA64_GENERIC
-#define CONFIG_HZ 32
-
-#define CONFIG_IA64_L1_CACHE_SHIFT 7
-// needed by include/asm-ia64/page.h
-#define CONFIG_IA64_PAGE_SIZE_16KB // 4KB doesn't work?!?
-#define CONFIG_IA64_GRANULE_16MB
-
-// this needs to be on to run on system with large memory hole
-#define CONFIG_VIRTUAL_FRAME_TABLE
-
-#define CONFIG_EFI
-#define CONFIG_EFI_PCDP
-#define CONFIG_SERIAL_SGI_L1_CONSOLE
-#define CONFIG_KEXEC 1
-#define CONFIG_XENOPROF 1
-
-#define CONFIG_XEN_SMP
-
-#ifdef CONFIG_XEN_SMP
-#define CONFIG_SMP 1
-#define CONFIG_HOTPLUG_CPU 1
-#ifdef MAX_PHYS_CPUS
-#define NR_CPUS MAX_PHYS_CPUS
-#else
-#define NR_CPUS 64
-#endif
-#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS
-#define MAX_HVM_VCPUS MAX_VIRT_CPUS
-#define CONFIG_NUMA
-#define CONFIG_ACPI_NUMA
-#define NODES_SHIFT 8 /* linux/asm/numnodes.h */
-#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) /* linux-xen/asm/acpi.h */
-#else
-#undef CONFIG_SMP
-#define NR_CPUS 1
-#endif
-#define CONFIG_NR_CPUS NR_CPUS
-
-#define CONFIG_IOSAPIC
-#define supervisor_mode_kernel (0)
-
-#define PADDR_BITS 48
-
-/* If PERFC is used, include privop maps. */
-#ifdef PERF_COUNTERS
-#define CONFIG_PRIVOP_ADDRS
-#define PRIVOP_COUNT_NADDRS 30
-#endif
-
-#define CONFIG_VGA 1
-
-
-#define NR_hypercalls 64
-
-/* PV domains use this value for priv. level 0 emulation */
-#define CONFIG_CPL0_EMUL 1
-
-#ifndef __ASSEMBLY__
-
-// can't find where this typedef was before?!?
-// needed by include/asm-ia64/processor.h (and other places)
-typedef int pid_t;
-
-// now needed for xen/include/mm.h
-typedef unsigned long paddr_t;
-#define INVALID_PADDR (~0UL)
-#define PRIpaddr "016lx"
-// from include/linux/kernel.h
-#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
-
-//////////////////////////////////////
-
-#define FASTCALL(x) x // see linux/include/linux/linkage.h
-#define fastcall // " "
-
-#define watchdog_disable() ((void)0)
-#define watchdog_enable() ((void)0)
-// from linux/include/linux/types.h
-#define CLEAR_BITMAP(name,bits) \
- memset(name, 0, BITS_TO_LONGS(bits)*sizeof(unsigned long))
-
-extern unsigned long total_pages;
-extern unsigned long xen_pstart;
-extern unsigned long xenheap_size;
-
-extern int running_on_sim;
-
-// from linux/include/linux/mm.h
-extern struct page_info *mem_map;
-
-// xen/include/asm/config.h
-extern char _end[]; /* standard ELF symbol */
-
-// linux/include/linux/compiler.h
-//#define __kernel
-//#define __safe
-#define __force
-#define __chk_user_ptr(x) (void)0
-//#define __chk_io_ptr(x) (void)0
-//#define __builtin_warning(x, y...) (1)
-//#define __acquires(x)
-//#define __releases(x)
-//#define __acquire(x) (void)0
-//#define __release(x) (void)0
-//#define __cond_lock(x) (x)
-//#define __must_check
-#define __deprecated
-
-// xen/include/asm/config.h
-//#define HZ 1000
-// FIXME SMP: leave SMP for a later time
-///////////////////////////////////////////////////////////////
-// xen/include/asm/config.h
-#define ELFSIZE 64
-
-///////////////////////////////////////////////////////////////
-
-// get rid of difficult circular include dependency
-#define CMPXCHG_BUGCHECK(v)
-#define CMPXCHG_BUGCHECK_DECL
-
-// from include/asm-ia64/smp.h
-#define get_cpu() smp_processor_id()
-#define put_cpu() do {} while(0)
-#define put_cpu_no_resched() do{} while (0)
-
-// needed for common/dom0_ops.c until hyperthreading is supported
-#ifdef CONFIG_SMP
-extern int smp_num_siblings;
-#else
-#define smp_num_siblings 1
-#endif
-
-// function calls; see decl in xen/include/xen/sched.h
-#undef free_task_struct
-#undef alloc_task_struct
-#define get_thread_info(v) alloc_thread_info(v)
-
-// avoid redefining task_t in asm/thread_info.h
-#define task_t struct domain
-
-// avoid redefining task_struct in asm/current.h
-#define task_struct vcpu
-
-#include <xen/cache.h>
-#ifndef CONFIG_SMP
-#define __cacheline_aligned_in_smp
-#else
-#define __cacheline_aligned_in_smp __cacheline_aligned
-#endif
-
-#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
-#ifndef CONFIG_SMP
-#define ____cacheline_aligned_in_smp
-#else
-#define ____cacheline_aligned_in_smp ____cacheline_aligned
-#endif
-
-#define CONFIG_PERFMON
-
-#ifndef __ASSEMBLY__
-#include "asm/types.h" // for u64
-#include "linux/linkage.h" // for asmlinkage which is used by
- // xen/include/acpi/acpixf.h
-#endif
-
-// warning: unless search_extable is declared, the return value gets
-// truncated to 32-bits, causing a very strange error in privop handling
-struct exception_table_entry;
-
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *first,
- const struct exception_table_entry *last,
- unsigned long value);
-void sort_extable(struct exception_table_entry *start,
- struct exception_table_entry *finish);
-void sort_main_extable(void);
-
-#define find_first_set_bit(x) (ffs(x)-1) // FIXME: Is this right???
-
-// see drivers/char/console.c
-#define OPT_CONSOLE_STR "com1"
-
-#define __nocast
-
-// see include/asm-x86/atomic.h (different from standard linux)
-#define _atomic_set(v,i) (((v).counter) = (i))
-#define _atomic_read(v) ((v).counter)
-#define atomic_compareandswap(old, new, v) ((atomic_t){ cmpxchg(v, _atomic_read(old), _atomic_read(new)) })
-
-// Deprivated linux inf and put here for short time compatibility
-#define kmalloc(s, t) xmalloc_bytes((s))
-#define kfree(s) xfree((s))
-#define kzalloc(size, flags) \
-({ \
- unsigned char *mem; \
- mem = (unsigned char *)xmalloc_bytes(size); \
- if (mem) \
- memset(mem, 0, size); \
- (void *)mem; \
-})
-#define kcalloc(n, size, flags) kzalloc(n * size, flags)
-#define alloc_bootmem_node(pg, size) xmalloc_bytes(size)
-
-// see common/keyhandler.c
-#define nop() asm volatile ("nop 0")
-
-// needed for include/xen/linuxtime.h
-typedef s64 time_t;
-typedef s64 suseconds_t;
-
-// needed for include/linux/jiffies.h
-typedef long clock_t;
-
-// from include/linux/kernel.h, needed by jiffies.h
-#define typecheck(type,x) \
-({ type __dummy; \
- typeof(x) __dummy2; \
- (void)(&__dummy == &__dummy2); \
- 1; \
-})
-
-// from include/linux/timex.h, needed by arch/ia64/time.c
-#define TIME_SOURCE_CPU 0
-
-// used in common code
-#define softirq_pending(cpu) (cpu_data(cpu)->softirq_pending)
-
-// dup'ed from signal.h to avoid changes to includes
-#define SA_SHIRQ 0x04000000
-#define SA_INTERRUPT 0x20000000
-
-// needed for setup.c
-extern unsigned long loops_per_jiffy;
-extern char saved_command_line[];
-struct screen_info { };
-#define seq_printf(a,b...) printk(b)
-//#define CONFIG_BLK_DEV_INITRD // needed to reserve memory for domain0
-
-#define CONFIG_SHADOW 1
-
-// xen/include/asm/config.h
-/******************************************************************************
- * config.h
- *
- * A Linux-style configuration list.
- */
-
-#ifndef __XEN_IA64_CONFIG_H__
-#define __XEN_IA64_CONFIG_H__
-
-#undef CONFIG_X86
-
-#define CONFIG_MCKINLEY
-
-#undef CONFIG_X86_LOCAL_APIC
-#undef CONFIG_X86_IO_APIC
-#undef CONFIG_X86_L1_CACHE_SHIFT
-
-//#ifndef CONFIG_IA64_HP_SIM
-// looks like this is hard to turn off for Xen
-#define CONFIG_ACPI 1
-#define CONFIG_ACPI_BOOT 1
-//#endif
-
-#define CONFIG_XEN_ATTENTION_KEY 1
-#endif /* __ASSEMBLY__ */
-#endif /* __XEN_IA64_CONFIG_H__ */
-
-/* Allow .serialize.data/instruction in asm files.
- Old as doesn't handle this. */
-#define HAVE_SERIALIZE_DIRECTIVE
-
-/* Define CONFIG_PRIVIFY to support privified OS (deprecated). */
-#undef CONFIG_PRIVIFY
-
-#define CONFIG_XENCOMM_MARK_DIRTY 1
-
-#define ARCH_CRASH_SAVE_VMCOREINFO
-
-#endif /* _IA64_CONFIG_H_ */
diff --git a/xen/include/asm-ia64/debugger.h b/xen/include/asm-ia64/debugger.h
deleted file mode 100644
index 9c55744436..0000000000
--- a/xen/include/asm-ia64/debugger.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/******************************************************************************
- * asm/debugger.h
- *
- * Generic hooks into arch-dependent Xen.
- *
- * Each debugger should define two functions here:
- *
- * 1. debugger_trap_entry():
- * Called at start of any synchronous fault or trap, before any other work
- * is done. The idea is that if your debugger deliberately caused the trap
- * (e.g. to implement breakpoints or data watchpoints) then you can take
- * appropriate action and return a non-zero value to cause early exit from
- * the trap function.
- *
- * 2. debugger_trap_fatal():
- * Called when Xen is about to give up and crash. Typically you will use this
- * hook to drop into a debug session. It can also be used to hook off
- * deliberately caused traps (which you then handle and return non-zero)
- * but really these should be hooked off 'debugger_trap_entry'.
- */
-
-#ifndef __ASM_DEBUGGER_H__
-#define __ASM_DEBUGGER_H__
-
-// this number is an arbitary number which is not used for any other purpose
-// __builtin_trap() 0x0
-// ski 0x80001, 0x80002
-// kdb 0x80100, 0x80101
-// kprobe 0x80200, jprobe 0x80300
-// kgdb 0x6665
-// gdb 0x99998 (#define IA64_BREAKPOINT 0x00003333300LL)
-// ltrace 0x80001 (NOTE: this conflicts ski)
-
-// cdb should handle 0 and CDB_BREAK_NUM.
-#define CDB_BREAK_NUM 0x80800
-
-
-#ifndef __ASSEMBLY__
-
-#include <xen/sched.h>
-#include <xen/softirq.h>
-#include <xen/gdbstub.h>
-#include <public/arch-ia64/debug_op.h>
-
-void show_registers(struct cpu_user_regs *regs);
-void dump_stack(void);
-
-static inline void
-show_execution_state(struct cpu_user_regs *regs)
-{
- show_registers(regs);
-}
-
-// NOTE: on xen struct pt_regs = struct cpu_user_regs
-// see include/asm-ia64/linux-xen/asm/ptrace.h
-#ifdef CRASH_DEBUG
-// crash_debug=y
-
-extern int __trap_to_cdb(struct cpu_user_regs *r);
-static inline int debugger_trap_fatal(
- unsigned int vector, struct cpu_user_regs *regs)
-{
- (void)__trap_to_gdb(regs, vector);
- return 0;
-}
-
-#define ____debugger_trap_immediate(b) __asm__ __volatile__ ("break.m "#b"\n")
-#define __debugger_trap_immediate(b) ____debugger_trap_immediate(b)
-#define debugger_trap_immediate() __debugger_trap_immediate(CDB_BREAK_NUM)
-
-//XXX temporal work around
-#ifndef CONFIG_SMP
-#define smp_send_stop() /* nothing */
-#endif
-
-#else
-static inline int debugger_trap_fatal(
- unsigned int vector, struct cpu_user_regs *regs)
-{
- return 0;
-}
-
-#define debugger_trap_immediate() ((void)0)
-#endif
-
-static inline int debugger_event(unsigned long event)
-{
- struct vcpu *v = current;
- struct domain *d = v->domain;
-
- if (unlikely (d->debugger_attached && (d->arch.debug_flags & event))) {
- d->arch.debug_event = event;
- domain_pause_for_debugger();
- return 1;
- }
- return 0;
-}
-
-static inline int debugger_kernel_event(
- struct cpu_user_regs *regs, unsigned long event)
-{
- struct vcpu *v = current;
- struct domain *d = v->domain;
-
- if (unlikely(d->debugger_attached && (d->arch.debug_flags & event)
- && guest_kernel_mode(regs))) {
- d->arch.debug_event = event;
- domain_pause_for_debugger();
- return 1;
- }
- return 0;
-}
-
-#endif // __ASSEMBLLY__
-
-#endif /* __ASM_DEBUGGER_H__ */
diff --git a/xen/include/asm-ia64/dom_fw.h b/xen/include/asm-ia64/dom_fw.h
deleted file mode 100644
index 444804feb7..0000000000
--- a/xen/include/asm-ia64/dom_fw.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Xen domain firmware emulation
- *
- * Copyright (C) 2004 Hewlett-Packard Co
- * Dan Magenheimer (dan.magenheimer@hp.com)
- */
-
-#define __IA64_XEN_HYPERCALL_DEFAULT 0x1000
-#define __IA64_XEN_HYPERCALL_DEFAULT_STR "0x1000"
-
-/* Portion of guest physical memory space reserved for PAL/SAL/EFI/ACPI
- data and code. */
-#define FW_BASE_PADDR 0x0000UL
-/* It is assumed that FW_END_PADDR_MIN = FW_TABLES_END_PADDR_MIN */
-#define FW_END_PADDR_MIN 0x3000UL
-
-/* This is used to determined the portion of a domain's metaphysical memory
- space reserved for the hypercall patch table. */
-/* Map:
- Index Addr
- 0x0000-0x000f 0x0000-0x00ff : unused
- 0x0010-0x001f 0x0100-0x01ff : EFI
- 0x0080-0x008f 0x0800-0x08ff : PAL/SAL
- 0x0090-0x009f 0x0900-0x09ff : FPSWA
-*/
-#define FW_HYPERCALL_BASE_PADDR 0x0000UL
-#define FW_HYPERCALL_END_PADDR 0X1000UL
-#define FW_HYPERCALL_PADDR(index) (FW_HYPERCALL_BASE_PADDR + (16UL * index))
-
-/* Base and end guest physical address of ACPI tables. */
-#define FW_ACPI_BASE_PADDR 0x1000UL
-#define FW_ACPI_END_PADDR 0x2000UL
-
-/* Base and end guest physical address of EFI and SAL (non-ACPI) tables. */
-#define FW_TABLES_BASE_PADDR 0x2000UL
-#define FW_TABLES_END_PADDR_MIN 0x3000UL
-
-
-/* Hypercalls number have a low part and a high part.
- The high part is the class (xen/pal/sal/efi). */
-#define FW_HYPERCALL_NUM_MASK_HIGH ~0xffUL
-#define FW_HYPERCALL_NUM_MASK_LOW 0xffUL
-
-/* Xen hypercalls are 0-63. */
-#define FW_HYPERCALL_XEN 0x0000UL
-
-/* Define some faster and lighter hypercalls.
- See definitions in arch-ia64.h */
-#define FW_HYPERCALL_XEN_FAST 0x0200UL
-
-/*
- * PAL can be called in physical or virtual mode simply by
- * branching to pal_entry_point, which is found in one of the
- * SAL system table entrypoint descriptors (type=0). Parameters
- * may be passed in r28-r31 (static) or r32-r35 (stacked); which
- * convention is used depends on which procedure is being called.
- * r28 contains the PAL index, the indicator of which PAL procedure
- * is to be called: Index=0 is reserved, 1-255 indicates static
- * parameters, 256-511 indicates stacked parameters. 512-1023
- * are implementation-specific and 1024+ are reserved.
- * rp=b0 indicates the return point.
- *
- * A single hypercall is used for all PAL calls.
- * The hypercall stub is xen_ia64_pal_call_stub (dom_fw_asm.S).
- * Its size is 2 bundles.
- */
-
-#define FW_HYPERCALL_PAL_CALL_INDEX 0x80UL
-#define FW_HYPERCALL_PAL_CALL_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_PAL_CALL_INDEX)
-#define FW_HYPERCALL_PAL_CALL 0x1000UL
-#define FW_HYPERCALL_PAL_CALL_ASM 0x1000
-
-/*
- * SAL consists of a table of descriptors, one of which (type=0)
- * contains a sal_entry_point which provides access to a number of
- * functions. Parameters are passed in r33-r39; r32 contains the
- * index of the SAL function being called. At entry, r1=gp contains
- * a global pointer which may be needed by the function. rp=b0
- * indicates the return point. SAL may not be re-entrant; an
- * OS must ensure it is called by one processor at a time.
- *
- * A single hypercall is used for all SAL calls.
- */
-
-#define FW_HYPERCALL_SAL_CALL_INDEX 0x82UL
-#define FW_HYPERCALL_SAL_CALL_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_SAL_CALL_INDEX)
-#define FW_HYPERCALL_SAL_CALL 0x1100UL
-
-/* SAL return point. */
-#define FW_HYPERCALL_SAL_RETURN_INDEX 0x84UL
-#define FW_HYPERCALL_SAL_RETURN_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_SAL_RETURN_INDEX)
-#define FW_HYPERCALL_SAL_RETURN 0x1200UL
-
-/*
- * EFI is accessed via the EFI system table, which contains:
- * - a header which contains version info
- * - console information (stdin,stdout,stderr)
- * as well as pointers to:
- * - the EFI configuration table, which contains GUID/pointer pairs,
- * one of which is a pointer to the SAL system table; another is
- * a pointer to the ACPI table
- * - the runtime services table, which contains a header followed by
- * a list of (11) unique "runtime" entry points. EFI runtime entry
- * points are real function descriptors so contain both a (physical)
- * address and a global pointer. They are entered (at first) in
- * physical mode, though it is possible (optionally... requests can
- * be ignored and calls still must be OK) to call one entry point
- * which switches the others so they are capable of being called in
- * virtual mode. Parameters are passed in stacked registers, and
- * rp=b0 indicates the return point.
- * - the boot services table, which contains bootloader-related
- * entry points (ADD MORE HERE LATER)
- *
- * Each runtime (and boot) entry point requires a unique hypercall.
- */
-
-/* these are indexes into the runtime services table */
-#define FW_HYPERCALL_EFI_GET_TIME_INDEX 0x10UL
-#define FW_HYPERCALL_EFI_SET_TIME_INDEX 0x11UL
-#define FW_HYPERCALL_EFI_GET_WAKEUP_TIME_INDEX 0x12UL
-#define FW_HYPERCALL_EFI_SET_WAKEUP_TIME_INDEX 0x13UL
-#define FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP_INDEX 0x14UL
-#define FW_HYPERCALL_EFI_GET_VARIABLE_INDEX 0x15UL
-#define FW_HYPERCALL_EFI_GET_NEXT_VARIABLE_INDEX 0x16UL
-#define FW_HYPERCALL_EFI_SET_VARIABLE_INDEX 0x17UL
-#define FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT_INDEX 0x18UL
-#define FW_HYPERCALL_EFI_RESET_SYSTEM_INDEX 0x19UL
-
-/* these are hypercall numbers */
-#define FW_HYPERCALL_EFI_CALL 0x300UL
-#define FW_HYPERCALL_EFI_GET_TIME 0x300UL
-#define FW_HYPERCALL_EFI_SET_TIME 0x301UL
-#define FW_HYPERCALL_EFI_GET_WAKEUP_TIME 0x302UL
-#define FW_HYPERCALL_EFI_SET_WAKEUP_TIME 0x303UL
-#define FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP 0x304UL
-#define FW_HYPERCALL_EFI_GET_VARIABLE 0x305UL
-#define FW_HYPERCALL_EFI_GET_NEXT_VARIABLE 0x306UL
-#define FW_HYPERCALL_EFI_SET_VARIABLE 0x307UL
-#define FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT 0x308UL
-#define FW_HYPERCALL_EFI_RESET_SYSTEM 0x309UL
-
-/* these are the physical addresses of the pseudo-entry points that
- * contain the hypercalls */
-#define FW_HYPERCALL_EFI_GET_TIME_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_GET_TIME_INDEX)
-#define FW_HYPERCALL_EFI_SET_TIME_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_SET_TIME_INDEX)
-#define FW_HYPERCALL_EFI_GET_WAKEUP_TIME_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_GET_WAKEUP_TIME_INDEX)
-#define FW_HYPERCALL_EFI_SET_WAKEUP_TIME_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_SET_WAKEUP_TIME_INDEX)
-#define FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP_INDEX)
-#define FW_HYPERCALL_EFI_GET_VARIABLE_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_GET_VARIABLE_INDEX)
-#define FW_HYPERCALL_EFI_GET_NEXT_VARIABLE_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_GET_NEXT_VARIABLE_INDEX)
-#define FW_HYPERCALL_EFI_SET_VARIABLE_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_SET_VARIABLE_INDEX)
-#define FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT_INDEX)
-#define FW_HYPERCALL_EFI_RESET_SYSTEM_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_RESET_SYSTEM_INDEX)
-
-/*
- * This is a hypercall number for IPI.
- * A pseudo-entry-point is not presented to IPI hypercall. This hypercall number
- * is used in xen_send_ipi of linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S.
- */
-#define FW_HYPERCALL_IPI 0x400UL
-
-/*
- * This is a hypercall number for FPSWA.
- * FPSWA hypercall uses one bundle for a pseudo-entry-point
- * and 14 bundles for a hypercall-patch.
- *
- * 0x500 was used before. But that implemetation is broken.
- * To keep hypercall abi, 0x500 is obsoleted and allocate 0x501 for
- * fspwa hypercall.
- */
-#define FW_HYPERCALL_FPSWA_ENTRY_INDEX 0x90UL
-#define FW_HYPERCALL_FPSWA_PATCH_INDEX 0x91UL
-#define FW_HYPERCALL_FPSWA_ENTRY_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_FPSWA_ENTRY_INDEX)
-#define FW_HYPERCALL_FPSWA_PATCH_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_FPSWA_PATCH_INDEX)
-#define FW_HYPERCALL_FPSWA_BASE 0x500UL
-#define FW_HYPERCALL_FPSWA_BROKEN 0x500UL
-#define FW_HYPERCALL_FPSWA 0x501UL
-#define FW_HYPERCALL_FPSWA_STR "0x501"
-
-/* Set the shared_info base virtual address. */
-#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600UL
-
-/* Hvmstub hypercalls. See details in hvm_stub.h */
-#define FW_HYPERCALL_SIOEMU 0x800UL
-
-/* Hypercalls index bellow _FIRST_ARCH are reserved by Xen, while those above
- are for the architecture.
- Note: this limit was defined by Xen/ia64 (and not by Xen).
- This can be renumbered safely.
-*/
-#define FW_HYPERCALL_FIRST_ARCH 0x300UL
-
-/* Interrupt vector used for os boot rendez vous. */
-#define XEN_SAL_BOOT_RENDEZ_VEC 0xF3
-
-#define EFI_MEMDESC_VERSION 1
-
-/* Additionnal OEM SAL. */
-#define SAL_XEN_SAL_RETURN 0x02000000
-
-#if defined(__XEN__) && !defined(__ASSEMBLY__)
-#include <linux/efi.h>
-extern struct ia64_pal_retval xen_pal_emulator(u64, u64, u64, u64);
-extern struct sal_ret_values sal_emulator (long index, unsigned long in1, unsigned long in2, unsigned long in3, unsigned long in4, unsigned long in5, unsigned long in6, unsigned long in7);
-extern struct ia64_pal_retval pal_emulator_static (unsigned long);
-extern efi_status_t efi_emulator (struct pt_regs *regs, unsigned long *fault);
-
-extern int dom_fw_setup (struct domain *, unsigned long bp_mpa, unsigned long maxmem);
-#endif
diff --git a/xen/include/asm-ia64/dom_fw_common.h b/xen/include/asm-ia64/dom_fw_common.h
deleted file mode 100644
index f0887f2aa3..0000000000
--- a/xen/include/asm-ia64/dom_fw_common.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/******************************************************************************
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-#ifndef __ASM_IA64_DOM_FW_COMMON_H__
-#define __ASM_IA64_DOM_FW_COMMON_H__
-
-#ifdef __XEN__
-#include <linux/efi.h>
-#include <asm/sal.h>
-#include <xen/sched.h>
-typedef struct domain domain_t;
-#else
-#include "xc_efi.h"
-#include "ia64/sal.h"
-#include "xg_private.h"
-typedef struct xc_dom_image domain_t;
-
-#define XENLOG_INFO "info:"
-#define XENLOG_WARNING "Warning:"
-#define XENLOG_GUEST ""
-#define printk(fmt, args ...) IPRINTF(fmt, ## args)
-
-#define BUG_ON(p) assert(!(p))
-#define BUILD_BUG_ON(condition) ((void)sizeof(struct { int:-!!(condition); }))
-
-//for sort in linux/sort.h.
-#define sort(base, num, size, cmp, swap) qsort((base), (num), (size), (cmp))
-#endif
-
-#include <asm/fpswa.h>
-
-#define ONE_MB (1UL << 20)
-#define FW_VENDOR "X\0e\0n\0/\0i\0a\0\066\0\064\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
-
-#define NFUNCPTRS 16
-#define NUM_EFI_SYS_TABLES 6
-
-struct fw_tables {
- efi_system_table_t efi_systab;
- efi_runtime_services_t efi_runtime;
- efi_config_table_t efi_tables[NUM_EFI_SYS_TABLES];
-
- struct ia64_sal_systab sal_systab;
- struct ia64_sal_desc_entry_point sal_ed;
- struct ia64_sal_desc_ap_wakeup sal_wakeup;
- /* End of SAL descriptors. Do not forget to update checkum bound. */
-
- fpswa_interface_t fpswa_inf;
- unsigned long func_ptrs[2*NFUNCPTRS];
- struct xen_sal_data sal_data;
- unsigned char fw_vendor[sizeof(FW_VENDOR)];
-
- /*
- * These four member for domain builder internal use at virtualized
- * efi memmap creation. They should be zero-cleared after use.
- */
- unsigned long fw_tables_size;
- unsigned long fw_end_paddr;
- unsigned long fw_tables_end_paddr;
- unsigned long num_mds;
-
- efi_memory_desc_t efi_memmap[0];
-};
-#define FW_FIELD_MPA(field) \
- FW_TABLES_BASE_PADDR + offsetof(struct fw_tables, field)
-
-void
-xen_ia64_efi_make_md(efi_memory_desc_t *md,
- uint32_t type, uint64_t attr,
- uint64_t start, uint64_t end);
-struct fake_acpi_tables;
-void dom_fw_fake_acpi(domain_t *d, struct fake_acpi_tables *tables);
-int efi_mdt_cmp(const void *a, const void *b);
-
-struct ia64_boot_param;
-int dom_fw_init(domain_t *d, uint64_t brkimm, struct xen_ia64_boot_param *bp,
- struct fw_tables *tables, unsigned long hypercalls_imva,
- unsigned long maxmem);
-
-// XEN_DOMCTL_arch_setup hypercall abuse
-// struct ia64_boot_param::domain_{start, size}
-// to pass memmap_pfn and memmap_size.
-// This imposes arch_setup hypercall must be done before
-// setting bp->domain_{size, start} and the domain builder must clean it later.
-#define XEN_IA64_MEMMAP_INFO_NUM_PAGES(bp) (bp)->domain_size
-#define XEN_IA64_MEMMAP_INFO_PFN(bp) (bp)->domain_start
-
-#endif /* __ASM_IA64_DOM_FW_COMMON_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-ia64/dom_fw_dom0.h b/xen/include/asm-ia64/dom_fw_dom0.h
deleted file mode 100644
index cc52c908b8..0000000000
--- a/xen/include/asm-ia64/dom_fw_dom0.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/******************************************************************************
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-#ifndef __ASM_IA64_DOM_FW_DOM0_H__
-#define __ASM_IA64_DOM_FW_DOM0_H__
-
-struct fw_tables;
-struct domain;
-
-void efi_systable_init_dom0(struct fw_tables *tables);
-int complete_dom0_memmap(struct domain *d, struct fw_tables *tables);
-void acpi_restore_tables(void);
-
-#endif /* __ASM_IA64_DOM_FW_DOM0_H__ */
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
-
diff --git a/xen/include/asm-ia64/dom_fw_domu.h b/xen/include/asm-ia64/dom_fw_domu.h
deleted file mode 100644
index 90e0f1076d..0000000000
--- a/xen/include/asm-ia64/dom_fw_domu.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-#ifndef __ASM_IA64_DOM_FW_DOMU_H__
-#define __ASM_IA64_DOM_FW_DOMU_H__
-
-#include <asm/dom_fw_common.h>
-
-#ifdef __XEN__
-void efi_systable_init_domu(struct fw_tables *tables);
-#else
-void efi_systable_init_domu(xc_interface *xch, struct fw_tables *tables);
-#endif
-
-int
-complete_domu_memmap(domain_t *d,
- struct fw_tables *tables,
- unsigned long maxmem,
- unsigned long memmap_info_pfn,
- unsigned long reserved_size);
-
-#endif /* __ASM_IA64_DOM_FW_DOMU_H__ */
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
-
diff --git a/xen/include/asm-ia64/dom_fw_utils.h b/xen/include/asm-ia64/dom_fw_utils.h
deleted file mode 100644
index d54d87479c..0000000000
--- a/xen/include/asm-ia64/dom_fw_utils.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/******************************************************************************
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-#ifndef __ASM_XEN_IA64_DOM_FW_UTILS_H__
-#define __ASM_XEN_IA64_DOM_FW_UTILS_H__
-
-uint32_t xen_ia64_version(struct domain *unused);
-int xen_ia64_fpswa_revision(struct domain *d, unsigned int *revision);
-int xen_ia64_is_vcpu_allocated(struct domain *d, uint32_t vcpu);
-int xen_ia64_is_running_on_sim(struct domain *unused);
-int xen_ia64_is_dom0(struct domain *d);
-void xen_ia64_set_convmem_end(struct domain *d, uint64_t convmem_end);
-void dom_fw_copy_to(struct domain *d, unsigned long dest_gpaddr,
- void *src, size_t size);
-void dom_fw_copy_from(void* dest, struct domain *d, unsigned long src_gpaddr,
- size_t size);
-
-#endif /* __ASM_XEN_IA64_DOM_FW_UTILS_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h
deleted file mode 100644
index 31d7d32a30..0000000000
--- a/xen/include/asm-ia64/domain.h
+++ /dev/null
@@ -1,367 +0,0 @@
-#ifndef __ASM_DOMAIN_H__
-#define __ASM_DOMAIN_H__
-
-#include <linux/thread_info.h>
-#include <asm/tlb.h>
-#include <asm/vmx_vpd.h>
-#include <asm/vmmu.h>
-#include <asm/regionreg.h>
-#include <public/xen.h>
-#include <asm/vmx_platform.h>
-#include <xen/list.h>
-#include <xen/cpumask.h>
-#include <xen/mm.h>
-#include <xen/hvm/irq.h>
-#include <asm/fpswa.h>
-#include <xen/rangeset.h>
-
-struct p2m_entry;
-#ifdef CONFIG_XEN_IA64_TLB_TRACK
-struct tlb_track;
-#endif
-
-extern unsigned long volatile jiffies;
-
-struct vcpu;
-extern void relinquish_vcpu_resources(struct vcpu *v);
-extern int vcpu_late_initialise(struct vcpu *v);
-
-#define alloc_vcpu_guest_context() xmalloc(struct vcpu_guest_context)
-#define free_vcpu_guest_context(vgc) xfree(vgc)
-
-/* given a current domain metaphysical address, return the physical address */
-extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
- struct p2m_entry* entry);
-
-/* Set shared_info virtual address. */
-extern unsigned long domain_set_shared_info_va (unsigned long va);
-
-/* Flush cache of domain d.
- If sync_only is true, only synchronize I&D caches,
- if false, flush and invalidate caches. */
-extern void domain_cache_flush (struct domain *d, int sync_only);
-
-/* Control the shadow mode. */
-extern int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc);
-
-/* Cleanly crash the current domain with a message. */
-extern void panic_domain(struct pt_regs *, const char *, ...)
- __attribute__ ((noreturn, format (printf, 2, 3)));
-
-#define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
-#define has_arch_mmios(d) (!rangeset_is_empty((d)->iomem_caps))
-
-struct mm_struct {
- volatile pgd_t * pgd;
- // atomic_t mm_users; /* How many users with user space? */
-};
-
-struct foreign_p2m {
- spinlock_t lock;
- /*
- * sorted list with entry->gpfn.
- * It is expected that only small number of foreign domain p2m
- * mapping happens at the same time.
- */
- struct list_head head;
-};
-
-struct last_vcpu {
-#define INVALID_VCPU_ID INT_MAX
- int vcpu_id;
-#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
- u32 tlbflush_timestamp;
-#endif
-} ____cacheline_aligned_in_smp;
-
-/* These are data in domain memory for SAL emulator. */
-struct xen_sal_data {
- /* OS boot rendez vous. */
- unsigned long boot_rdv_ip;
- unsigned long boot_rdv_r1;
-
- /* There are these for EFI_SET_VIRTUAL_ADDRESS_MAP emulation. */
- int efi_virt_mode; /* phys : 0 , virt : 1 */
-};
-
-/*
- * Optimization features are used by the hypervisor to do some optimizations
- * for guests. By default the optimizations are switched off and the guest
- * may activate the feature. The guest may do this via the hypercall
- * __HYPERVISOR_opt_feature. Domain builder code can also enable these
- * via XEN_DOMCTL_set_opt_feature.
- */
-
-/*
- * Helper struct for the different identity mapping optimizations.
- * The hypervisor does the insertion of address translations in the tlb
- * for identity mapped areas without reflecting the page fault
- * to the guest.
- */
-struct identity_mapping {
- unsigned long pgprot; /* The page protection bit mask of the pte.*/
- unsigned long key; /* A protection key. */
-};
-
-/* opt_feature mask */
-/*
- * If this feature is switched on, the hypervisor inserts the
- * tlb entries without calling the guests traphandler.
- * This is useful in guests using region 7 for identity mapping
- * like the linux kernel does.
- */
-#define XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT 0
-#define XEN_IA64_OPTF_IDENT_MAP_REG7_FLG \
- (1UL << XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT)
-
-/* Identity mapping of region 4 addresses in HVM. */
-#define XEN_IA64_OPTF_IDENT_MAP_REG4_FLG_BIT \
- (XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT + 1)
-#define XEN_IA64_OPTF_IDENT_MAP_REG4_FLG \
- (1UL << XEN_IA64_OPTF_IDENT_MAP_REG4_FLG_BIT)
-
-/* Identity mapping of region 5 addresses in HVM. */
-#define XEN_IA64_OPTF_IDENT_MAP_REG5_FLG_BIT \
- (XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT + 2)
-#define XEN_IA64_OPTF_IDENT_MAP_REG5_FLG \
- (1UL << XEN_IA64_OPTF_IDENT_MAP_REG5_FLG_BIT)
-
-/* Central structure for optimzation features used by the hypervisor. */
-struct opt_feature {
- unsigned long mask; /* For every feature one bit. */
- struct identity_mapping im_reg4; /* Region 4 identity mapping */
- struct identity_mapping im_reg5; /* Region 5 identity mapping */
- struct identity_mapping im_reg7; /* Region 7 identity mapping */
-};
-
-/* Set an optimization feature in the struct arch_domain. */
-extern int domain_opt_feature(struct domain *, struct xen_ia64_opt_feature*);
-
-struct arch_domain {
- struct mm_struct mm;
-
- /* Flags. */
- union {
- unsigned long flags;
- struct {
- unsigned int is_sioemu : 1;
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
- unsigned int has_pervcpu_vhpt : 1;
- unsigned int vhpt_size_log2 : 6;
-#endif
- };
- };
-
- /* maximum metaphysical address of conventional memory */
- u64 convmem_end;
-
- /* Allowed accesses to io ports. */
- struct rangeset *ioport_caps;
-
- /* There are two ranges of RID for a domain:
- one big range, used to virtualize domain RID,
- one small range for internal Xen use (metaphysical). */
- /* Big range. */
- unsigned int starting_rid; /* first RID assigned to domain */
- unsigned int ending_rid; /* one beyond highest RID assigned to domain */
- /* Metaphysical range. */
- unsigned int starting_mp_rid;
- unsigned int ending_mp_rid;
- /* RID for metaphysical mode. */
- unsigned int metaphysical_rid_dt; /* dt=it=0 */
- unsigned int metaphysical_rid_d; /* dt=0, it=1 */
-
- unsigned char rid_bits; /* number of virtual rid bits (default: 18) */
- int breakimm; /* The imm value for hypercalls. */
-
- struct list_head pdev_list;
- struct virtual_platform_def vmx_platform;
-#define hvm_domain vmx_platform /* platform defs are not vmx specific */
-
- u64 shared_info_va;
-
- /* Address of SAL emulator data */
- struct xen_sal_data *sal_data;
-
- /* Shared page for notifying that explicit PIRQ EOI is required. */
- unsigned long *pirq_eoi_map;
- unsigned long pirq_eoi_map_mfn;
- /* set auto_unmask to 1 if you want PHYSDEVOP_eoi to automatically
- * unmask the event channel */
- bool_t auto_unmask;
-
- /* Address of efi_runtime_services_t (placed in domain memory) */
- void *efi_runtime;
- /* Address of fpswa_interface_t (placed in domain memory) */
- void *fpswa_inf;
-
- /* Bitmap of shadow dirty bits.
- Set iff shadow mode is enabled. */
- u64 *shadow_bitmap;
- /* Length (in bits!) of shadow bitmap. */
- unsigned long shadow_bitmap_size;
- /* Number of bits set in bitmap. */
- atomic64_t shadow_dirty_count;
- /* Number of faults. */
- atomic64_t shadow_fault_count;
-
- /* for foreign domain p2m table mapping */
- struct foreign_p2m foreign_p2m;
-
- struct last_vcpu last_vcpu[NR_CPUS];
-
- struct opt_feature opt_feature;
-
- /* Debugging flags. See arch-ia64.h for bits definition. */
- unsigned int debug_flags;
-
- /* Reason of debugging break. */
- unsigned int debug_event;
-
-#ifdef CONFIG_XEN_IA64_TLB_TRACK
- struct tlb_track* tlb_track;
-#endif
-
- /* for domctl_destroy_domain continuation */
- enum {
- RELRES_not_started,
- RELRES_mm_teardown,
- RELRES_xen,
- RELRES_dom,
- RELRES_done,
- } relres;
- /* Continuable mm_teardown() */
- unsigned long mm_teardown_offset;
- /* Continuable domain_relinquish_resources() */
- struct page_list_head relmem_list;
-};
-#define INT_ENABLE_OFFSET(v) \
- (sizeof(vcpu_info_t) * (v)->vcpu_id + \
- offsetof(vcpu_info_t, evtchn_upcall_mask))
-
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
-#define HAS_PERVCPU_VHPT(d) ((d)->arch.has_pervcpu_vhpt)
-#else
-#define HAS_PERVCPU_VHPT(d) (0)
-#endif
-
-
-struct arch_vcpu {
- /* Save the state of vcpu.
- This is the first entry to speed up accesses. */
- mapped_regs_t *privregs;
-
- /* TR and TC. */
- TR_ENTRY itrs[NITRS];
- TR_ENTRY dtrs[NDTRS];
- TR_ENTRY itlb;
- TR_ENTRY dtlb;
-
- /* Bit is set if there is a tr/tc for the region. */
- unsigned char itr_regions;
- unsigned char dtr_regions;
- unsigned char tc_regions;
-
- unsigned long irr[4]; /* Interrupt request register. */
- unsigned long insvc[4]; /* Interrupt in service. */
- unsigned long iva;
- unsigned long domain_itm;
- unsigned long domain_itm_last;
-
- unsigned long event_callback_ip; // event callback handler
- unsigned long failsafe_callback_ip; // Do we need it?
-
- /* These fields are copied from arch_domain to make access easier/faster
- in assembly code. */
- unsigned long metaphysical_rid_dt; // from arch_domain (so is pinned)
- unsigned long metaphysical_rid_d; // from arch_domain (so is pinned)
- unsigned long metaphysical_saved_rr0; // from arch_domain (so is pinned)
- unsigned long metaphysical_saved_rr4; // from arch_domain (so is pinned)
- unsigned long fp_psr; // used for lazy float register
- u64 *shadow_bitmap; // from arch_domain (so is pinned)
- int breakimm; // from arch_domain (so is pinned)
- int starting_rid; /* first RID assigned to domain */
- int ending_rid; /* one beyond highest RID assigned to domain */
- unsigned char rid_bits; // from arch_domain (so is pinned)
-
- /* Bitset for debug register use. */
- unsigned int dbg_used;
- u64 dbr[IA64_NUM_DBG_REGS];
- u64 ibr[IA64_NUM_DBG_REGS];
-
- struct thread_struct _thread; // this must be last
-
- thash_cb_t vtlb;
- thash_cb_t vhpt;
- char irq_new_pending;
- char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
- char hypercall_continuation;
-
- fpswa_ret_t fpswa_ret; /* save return values of FPSWA emulation */
- struct timer hlt_timer;
- struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
-
- /* This vector hosts the protection keys for pkr emulation of PV domains.
- * Currently only 15 registers are usable by domU's. pkr[15] is
- * reserved for the hypervisor. */
- unsigned long pkrs[XEN_IA64_NPKRS+1]; /* protection key registers */
-#define XEN_IA64_PKR_IN_USE 0x1 /* If psr.pk = 1 was set. */
- unsigned char pkr_flags;
-
- unsigned char vhpt_pg_shift; /* PAGE_SHIFT or less */
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
- PTA pta;
- unsigned long vhpt_maddr;
- struct page_info* vhpt_page;
- unsigned long vhpt_entries;
-#endif
-#define INVALID_PROCESSOR INT_MAX
- int last_processor;
- cpumask_t cache_coherent_map;
-};
-
-#define pirq_dpci(pirq) ((pirq) ? &(pirq)->arch.dpci : NULL)
-#define dpci_pirq(dp) container_of(dp, struct pirq, arch.dpci)
-
-#define alloc_pirq_struct(d) ({ \
- struct pirq *pirq = xmalloc(struct pirq); \
- if ( pirq ) \
- { \
- memset(pirq, 0, sizeof(*pirq)); \
- pt_pirq_init(d, &pirq->arch.dpci); \
- } \
- pirq; \
-})
-
-#include <asm/uaccess.h> /* for KERNEL_DS */
-#include <asm/pgtable.h>
-
-int
-do_perfmon_op(unsigned long cmd,
- XEN_GUEST_HANDLE(void) arg1, unsigned long arg2);
-
-void
-ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
- unsigned long iim, unsigned long itir, unsigned long arg5,
- unsigned long arg6, unsigned long arg7, unsigned long stack);
-
-void
-ia64_lazy_load_fpu(struct vcpu *vcpu);
-
-int construct_dom0(
- struct domain *d,
- unsigned long image_start, unsigned long image_len,
- unsigned long initrd_start, unsigned long initrd_len,
- char *cmdline);
-
-#endif /* __ASM_DOMAIN_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-ia64/elf.h b/xen/include/asm-ia64/elf.h
deleted file mode 100644
index f8c1e74287..0000000000
--- a/xen/include/asm-ia64/elf.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef __IA64_ELF_H__
-#define __IA64_ELF_H__
-
-typedef struct {
- unsigned long r1;
- unsigned long r2;
- unsigned long r13;
- unsigned long cr_iip;
- unsigned long ar_rsc;
- unsigned long r30;
- unsigned long ar_bspstore;
- unsigned long ar_rnat;
- unsigned long ar_ccv;
- unsigned long ar_unat;
- unsigned long ar_pfs;
- unsigned long r31;
- unsigned long ar_csd;
- unsigned long ar_ssd;
-} ELF_Gregset;
-
-/*
- * elf_gregset_t contains the application-level state in the following order:
- * r0-r31
- * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
- * predicate registers (p0-p63)
- * b0-b7
- * ip cfm psr
- * ar.rsc ar.bsp ar.bspstore ar.rnat
- * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
- */
-#define ELF_NGREG 128 /* we really need just 72,
- * but let's leave some headroom */
-
-#define ALIGN_UP(addr, size) (((addr) + ((size) - 1)) & (~((size) - 1)))
-
-typedef unsigned long elf_greg_t;
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-typedef elf_gregset_t crash_xen_core_t;
-
-extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
-
-static inline void elf_core_save_regs(ELF_Gregset *core_regs,
- crash_xen_core_t *xen_core_regs)
-{
- elf_greg_t *aligned_xen_core_regs;
-
- /*
- * Re-align xen_core_regs to 64bit for access to avoid unaligned faults,
- * then memmove back in place.
- * xen_core_regs has headroom, so this is ok
- */
- aligned_xen_core_regs = (elf_greg_t *)ALIGN_UP((unsigned long)
- *xen_core_regs, 8);
- ia64_elf_core_copy_regs(NULL, aligned_xen_core_regs);
- memmove(*xen_core_regs, aligned_xen_core_regs, sizeof(crash_xen_core_t));
-}
-
-#endif /* __IA64_ELF_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-ia64/event.h b/xen/include/asm-ia64/event.h
deleted file mode 100644
index 4463cb3286..0000000000
--- a/xen/include/asm-ia64/event.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/******************************************************************************
- * event.h
- *
- * A nice interface for passing asynchronous events to guest OSes.
- * (architecture-dependent part)
- *
- */
-
-#ifndef __ASM_EVENT_H__
-#define __ASM_EVENT_H__
-
-#include <public/xen.h>
-#include <asm/vcpu.h>
-
-static inline void vcpu_kick(struct vcpu *v)
-{
- /*
- * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
- * pending flag. These values may fluctuate (after all, we hold no
- * locks) but the key insight is that each change will cause
- * evtchn_upcall_pending to be polled.
- *
- * NB2. We save the running flag across the unblock to avoid a needless
- * IPI for domains that we IPI'd to unblock.
- */
- int running = v->is_running;
- vcpu_unblock(v);
- if ( running )
- smp_send_event_check_cpu(v->processor);
-
- if(!VMX_DOMAIN(v) && !v->arch.event_callback_ip)
- vcpu_pend_interrupt(v, v->domain->shared_info->arch.evtchn_vector);
-}
-
-static inline void vcpu_mark_events_pending(struct vcpu *v)
-{
- if ( !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
- vcpu_kick(v);
-}
-
-/* Note: Bitwise operations result in fast code with no branches. */
-#define event_pending(v) \
- (!!(v)->vcpu_info->evtchn_upcall_pending & \
- !(v)->vcpu_info->evtchn_upcall_mask)
-
-static inline int local_events_need_delivery(void)
-{
- return event_pending(current);
-}
-
-static inline int local_event_delivery_is_enabled(void)
-{
- return !current->vcpu_info->evtchn_upcall_mask;
-}
-
-static inline void local_event_delivery_disable(void)
-{
- current->vcpu_info->evtchn_upcall_mask = 1;
-}
-
-static inline void local_event_delivery_enable(void)
-{
- current->vcpu_info->evtchn_upcall_mask = 0;
-}
-
-static inline int arch_virq_is_global(uint32_t virq)
-{
- int rc;
-
- switch ( virq )
- {
- case VIRQ_ITC:
- case VIRQ_MCA_CMC:
- case VIRQ_MCA_CPE:
- rc = 0;
- break;
- default:
- rc = 1;
- break;
- }
-
- return rc;
-}
-
-#endif
diff --git a/xen/include/asm-ia64/flushtlb.h b/xen/include/asm-ia64/flushtlb.h
deleted file mode 100644
index b470ac1615..0000000000
--- a/xen/include/asm-ia64/flushtlb.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/******************************************************************************
- * flushtlb.c
- * based on x86 flushtlb.h
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_FLUSHTLB_H__
-#define __ASM_FLUSHTLB_H__
-
-#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
-
-#include <xen/percpu.h>
-
-extern volatile u32 tlbflush_clock;
-#define tlbflush_current_time() tlbflush_clock
-
-u32 tlbflush_clock_inc_and_return(void);
-void new_tlbflush_clock_period(void);
-
-static inline void
-tlbflush_update_time(volatile u32* time, u32 timestamp)
-{
- /*
- * this should be ld4.rel + st4.acq. but only have release semantcis.
- * so this function can't be considered as memory barrier.
- */
- *time = timestamp;
-}
-
-/*
- * taken from x86's NEED_FLUSH()
- * obj_stamp: mTLB time stamp, per pcpu VHPT stamp, per vcpu VHPT stamp.
- */
-static inline int
-NEED_FLUSH(u32 obj_stamp, u32 lastuse_stamp)
-{
- u32 curr_time = tlbflush_current_time();
- /*
- * Two cases:
- * 1. During a wrap, the clock ticks over to 0 while CPUs catch up. For
- * safety during this period, we force a flush if @curr_time == 0.
- * 2. Otherwise, we look to see if @cpu_stamp <= @lastuse_stamp.
- * To detect false positives because @cpu_stamp has wrapped, we
- * also check @curr_time. If less than @lastuse_stamp we definitely
- * wrapped, so there's no need for a flush (one is forced every wrap).
- */
- return ((curr_time == 0) ||
- ((obj_stamp <= lastuse_stamp) && (lastuse_stamp <= curr_time)));
-}
-
-DECLARE_PER_CPU(volatile u32, tlbflush_time);
-DECLARE_PER_CPU(volatile u32, vhpt_tlbflush_timestamp);
-
-#else
-
-#define tlbflush_current_time() (0)
-#define tlbflush_clock_inc_and_return() (0)
-#define tlbflush_update_time(time, timestamp) do {(void)timestamp;} while (0)
-#define NEED_FLUSH(obj_stamp, lastuse_stamp) (1)
-
-#endif /* CONFIG_XEN_IA64_TLBFLUSH_CLOCK */
-
-#endif /* __ASM_FLUSHTLB_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-ia64/grant_table.h b/xen/include/asm-ia64/grant_table.h
deleted file mode 100644
index 27ee71b2af..0000000000
--- a/xen/include/asm-ia64/grant_table.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/******************************************************************************
- * include/asm-ia64/grant_table.h
- */
-
-#ifndef __ASM_GRANT_TABLE_H__
-#define __ASM_GRANT_TABLE_H__
-
-#include <asm/intrinsics.h>
-
-#define INITIAL_NR_GRANT_FRAMES 1
-
-// for grant map/unmap
-int create_grant_host_mapping(unsigned long gpaddr, unsigned long mfn,
- unsigned int flags, unsigned int cache_flags);
-int replace_grant_host_mapping(unsigned long gpaddr, unsigned long mfn, unsigned long new_gpaddr, unsigned int flags);
-
-// for grant transfer
-int guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long mfn, unsigned int page_order);
-
-/* XXX
- * somewhere appropriate
- * those constand shouldn't be pre-defined and
- * those area should be reserved by EFI MD.
- */
-/* Guest phsyical address of shared_info */
-#define IA64_SHARED_INFO_PADDR (1UL << 40)
-/* Guest phsyical address of mapped_regs */
-#define IA64_XMAPPEDREGS_BASE_PADDR (IA64_SHARED_INFO_PADDR + XSI_SIZE)
-#define IA64_XMAPPEDREGS_PADDR(vcpu_id) \
- (IA64_XMAPPEDREGS_BASE_PADDR + \
- (vcpu_id) * max_t(unsigned long, PAGE_SIZE, XMAPPEDREGS_SIZE))
-
-/* Guest physical address of the grant table. */
-#define IA64_GRANT_TABLE_PADDR IA64_XMAPPEDREGS_PADDR(NR_CPUS)
-
-#define gnttab_shared_maddr(t, i) (virt_to_maddr((t)->shared_raw[(i)]))
-#define gnttab_shared_page(t, i) (virt_to_page((t)->shared_raw[(i)]))
-
-#define gnttab_status_maddr(t, i) (virt_to_maddr((t)->status[(i)]))
-#define gnttab_status_mfn(t, i) (virt_to_maddr((t)->status[(i)]) >> PAGE_SHIFT)
-#define gnttab_status_page(t, i) (virt_to_page((t)->status[(i)]))
-
-#define ia64_gnttab_create_shared_page(d, t, i) \
- do { \
- BUG_ON((d)->arch.mm.pgd == NULL); \
- assign_domain_page((d), \
- IA64_GRANT_TABLE_PADDR + ((i) << PAGE_SHIFT), \
- gnttab_shared_maddr((t), (i))); \
- } while (0)
-
-/*
- * for grant table shared page
- * grant_table_create() might call this macro before allocating the p2m table.
- * In such case, arch_domain_create() completes the initialization.
- */
-#define gnttab_create_shared_page(d, t, i) \
- do { \
- share_xen_page_with_guest(gnttab_shared_page((t), (i)), \
- (d), XENSHARE_writable); \
- if ((d)->arch.mm.pgd) \
- ia64_gnttab_create_shared_page((d), (t), (i)); \
- } while (0)
-
-#define ia64_gnttab_create_status_page(d, t, i) \
- do { \
- BUG_ON((d)->arch.mm.pgd == NULL); \
- assign_domain_page((d), \
- IA64_GRANT_TABLE_PADDR + ((i) << PAGE_SHIFT), \
- gnttab_status_maddr((t), (i))); \
- } while (0)
-
-#define gnttab_create_status_page(d, t, i) \
- do { \
- share_xen_page_with_guest(gnttab_status_page((t), (i)), \
- (d), XENSHARE_writable); \
- if ((d)->arch.mm.pgd) \
- ia64_gnttab_create_status_page((d), (t), (i)); \
- } while (0)
-
-#define gnttab_shared_gmfn(d, t, i) \
- ((IA64_GRANT_TABLE_PADDR >> PAGE_SHIFT) + (i))
-#define gnttab_status_gmfn(d, t, i) \
- (mfn_to_gmfn(d, gnttab_status_mfn(t, i)))
-
-#define gnttab_mark_dirty(d, f) ((void)f)
-
-static inline void gnttab_clear_flag(unsigned int nr, volatile uint16_t *st)
-{
- /*
- * Note that this cannot be clear_bit(), as the access must be
- * confined to the specified 2 bytes.
- */
- uint16_t mask = ~(1 << nr), old;
- CMPXCHG_BUGCHECK_DECL
-
- do {
- CMPXCHG_BUGCHECK(st);
- old = *st;
- } while (cmpxchg_rel(st, old, old & mask) != old);
-}
-
-#define gnttab_host_mapping_get_page_type(op, ld, rd) \
- (!((op)->flags & GNTMAP_readonly))
-
-#define gnttab_release_host_mappings(domain) 1
-
-static inline int replace_grant_supported(void)
-{
- return 1;
-}
-
-#endif /* __ASM_GRANT_TABLE_H__ */
diff --git a/xen/include/asm-ia64/guest_access.h b/xen/include/asm-ia64/guest_access.h
deleted file mode 100644
index b26a392cb3..0000000000
--- a/xen/include/asm-ia64/guest_access.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- * Tristan Gingold <tristan.gingold@bull.net>
- */
-
-#ifndef __ASM_GUEST_ACCESS_H__
-#define __ASM_GUEST_ACCESS_H__
-
-#include <xen/types.h> /* arch-ia64.h which is included by xen.h
- requires uint64_t */
-#include <public/xen.h> /* for XENCOMM_INLINE_FLAG */
-#include <xen/xencomm.h>
-
-#endif /* __ASM_GUEST_ACCESS_H__ */
diff --git a/xen/include/asm-ia64/hardirq.h b/xen/include/asm-ia64/hardirq.h
deleted file mode 100644
index 28c508fbb7..0000000000
--- a/xen/include/asm-ia64/hardirq.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __IA64__HARDIRQ__H__
-#define __IA64__HARDIRQ__H__
-
-#define __ARCH_IRQ_STAT 1
-#define HARDIRQ_BITS 14
-#include <linux/hardirq.h>
-#include <xen/sched.h>
-
-#define local_softirq_pending() (local_cpu_data->softirq_pending)
-
-#endif
diff --git a/xen/include/asm-ia64/hvm/iommu.h b/xen/include/asm-ia64/hvm/iommu.h
deleted file mode 100644
index f5b70fee1a..0000000000
--- a/xen/include/asm-ia64/hvm/iommu.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __ASM_IA64_HVM_IOMMU_H__
-#define __ASM_IA64_HVM_IOMMU_H__
-
-#include <asm/hvm/irq.h>
-#include <public/event_channel.h>
-#include <public/arch-ia64/hvm/save.h>
-#include <asm/hw_irq.h>
-#include <asm/iosapic.h>
-
-struct iommu_ops;
-extern const struct iommu_ops intel_iommu_ops;
-extern int intel_vtd_setup(void);
-
-#define iommu_get_ops() (&intel_iommu_ops)
-#define iommu_hardware_setup() (intel_vtd_setup())
-
-static inline int domain_irq_to_vector(struct domain *d, int irq)
-{
- return irq;
-}
-
-static inline void ack_APIC_irq(void)
-{
- /* TODO */
-}
-
-static inline void pci_cleanup_msi(struct pci_dev *pdev)
-{
- /* TODO */
-}
-
-
-extern int assign_irq_vector (int irq);
-
-#endif /* __ASM_IA64_HVM_IOMMU_H__ */
diff --git a/xen/include/asm-ia64/hvm/irq.h b/xen/include/asm-ia64/hvm/irq.h
deleted file mode 100644
index 1e26ab76b4..0000000000
--- a/xen/include/asm-ia64/hvm/irq.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/******************************************************************************
- * irq.h
- *
- * Interrupt distribution and delivery logic.
- *
- * Copyright (c) 2006, K A Fraser, XenSource Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#ifndef __ASM_IA64_HVM_IRQ_H__
-#define __ASM_IA64_HVM_IRQ_H__
-
-#include <asm/irq.h>
-
-#define VIOAPIC_NUM_PINS 48
-
-#include <xen/hvm/irq.h>
-
-struct hvm_hw_pci_irqs {
- /*
- * Virtual interrupt wires for a single PCI bus.
- * Indexed by: device*4 + INTx#.
- */
- union {
- DECLARE_BITMAP(i, 32*4);
- uint64_t pad[2];
- };
-};
-
-struct hvm_irq {
- /*
- * Virtual interrupt wires for a single PCI bus.
- * Indexed by: device*4 + INTx#.
- */
- struct hvm_hw_pci_irqs pci_intx;
-
- /* Virtual interrupt and via-link for paravirtual platform driver. */
- uint32_t callback_via_asserted;
- union {
- enum {
- HVMIRQ_callback_none,
- HVMIRQ_callback_gsi,
- HVMIRQ_callback_pci_intx
- } callback_via_type;
- };
- union {
- uint32_t gsi;
- struct { uint8_t dev, intx; } pci;
- } callback_via;
-
- /*
- * Number of wires asserting each GSI.
- *
- * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space
- * except ISA IRQ 0, which is connected to GSI 2.
- * PCI links map into this space via the PCI-ISA bridge.
- *
- * GSIs 16+ are used only be PCI devices. The mapping from PCI device to
- * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16
- */
- u8 gsi_assert_count[VIOAPIC_NUM_PINS];
-
- /*
- * GSIs map onto PIC/IO-APIC in the usual way:
- * 0-7: Master 8259 PIC, IO-APIC pins 0-7
- * 8-15: Slave 8259 PIC, IO-APIC pins 8-15
- * 16+ : IO-APIC pins 16+
- */
-
- /* Last VCPU that was delivered a LowestPrio interrupt. */
- u8 round_robin_prev_vcpu;
-
- struct hvm_irq_dpci *dpci;
-};
-
-#define hvm_pci_intx_gsi(dev, intx) \
- (((((dev)<<2) + ((dev)>>3) + (intx)) & 31) + 16)
-#define hvm_pci_intx_link(dev, intx) \
- (((dev) + (intx)) & 3)
-
-#define IA64_INVALID_VECTOR ((unsigned int)((int)-1))
-static inline unsigned int irq_to_vector(int irq)
-{
- int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
- unsigned int vector;
-
- if ( acpi_gsi_to_irq(irq, &vector) < 0)
- return 0;
-
- return vector;
-}
-
-extern u8 irq_vector[NR_IRQS];
-extern int vector_irq[NR_VECTORS];
-
-#endif /* __ASM_IA64_HVM_IRQ_H__ */
diff --git a/xen/include/asm-ia64/hvm/support.h b/xen/include/asm-ia64/hvm/support.h
deleted file mode 100644
index fcbd3d2cb6..0000000000
--- a/xen/include/asm-ia64/hvm/support.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * xen/include/asm-ia64/hvm/save.h
- *
- * Copyright (c) 2007, Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * IA64 support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __ASM_IA64_HVM_SUPPORT_H__
-#define __ASM_IA64_HVM_SUPPORT_H__
-
-#include <xen/hvm/save.h>
-
-static inline int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest,
- uint8_t dest_mode)
-{
- /* TODO */
- return -ENOSYS;
-}
-
-static inline void hvm_migrate_pirqs(struct vcpu *v)
-{
- /* TODO */
-}
-
-#endif /* __ASM_IA64_HVM_SUPPORT_H__ */
diff --git a/xen/include/asm-ia64/hvm/vacpi.h b/xen/include/asm-ia64/hvm/vacpi.h
deleted file mode 100644
index 764aad3e8a..0000000000
--- a/xen/include/asm-ia64/hvm/vacpi.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * vacpi.h: Virtual ACPI definitions
- *
- * Copyright (c) 2007, FUJITSU LIMITED
- * Kouya Shimura <kouya at jp fujitsu com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#ifndef __ASM_IA64_HVM_VACPI_H__
-#define __ASM_IA64_HVM_VACPI_H__
-
-#include <public/arch-ia64/hvm/save.h> /* for struct vacpi_regs */
-#include <public/hvm/ioreq.h>
-
-#define IS_ACPI_ADDR(X) ((unsigned long)((X)-ACPI_PM1A_EVT_BLK_ADDRESS)<12)
-
-#define FREQUENCE_PMTIMER 3579545UL /* Timer should run at 3.579545 MHz */
-
-struct vacpi {
- struct vacpi_regs regs;
- s_time_t last_gtime;
- struct timer timer;
- spinlock_t lock;
-};
-
-int vacpi_intercept(ioreq_t * p, u64 * val);
-void vacpi_init(struct domain *d);
-void vacpi_relinquish_resources(struct domain *d);
-
-#endif /* __ASM_IA64_HVM_VACPI_H__ */
diff --git a/xen/include/asm-ia64/hvm/vlapic.h b/xen/include/asm-ia64/hvm/vlapic.h
deleted file mode 100644
index 7194bcf082..0000000000
--- a/xen/include/asm-ia64/hvm/vlapic.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASM_IA64_HVM_VLAPIC_H__
-#define __ASM_IA64_HVM_VLAPIC_H__
-
-#endif /* __ASM_IA64_HVM_VLAPIC_H__ */
diff --git a/xen/include/asm-ia64/hypercall.h b/xen/include/asm-ia64/hypercall.h
deleted file mode 100644
index 37846bc535..0000000000
--- a/xen/include/asm-ia64/hypercall.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/******************************************************************************
- * asm-ia64/hypercall.h
- */
-
-#ifndef __ASM_IA64_HYPERCALL_H__
-#define __ASM_IA64_HYPERCALL_H__
-
-#include <public/xen.h>
-#include <asm/types.h>
-#include <asm/vcpu.h>
-
-extern long
-do_event_channel_op_compat(
- XEN_GUEST_HANDLE(evtchn_op_t) uop);
-
-extern long do_pirq_guest_eoi(int pirq);
-
-extern int
-vmx_do_mmu_update(
- mmu_update_t *ureqs,
- u64 count,
- u64 *pdone,
- u64 foreigndom);
-
-#endif /* __ASM_IA64_HYPERCALL_H__ */
diff --git a/xen/include/asm-ia64/ia64_int.h b/xen/include/asm-ia64/ia64_int.h
deleted file mode 100644
index 711078104b..0000000000
--- a/xen/include/asm-ia64/ia64_int.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef _ASM_IA64_INT_H
-#define _ASM_IA64_INT_H
-
-//#include "ia64.h"
-
-#define IA64_VHPT_TRANS_VECTOR 0x0000
-#define IA64_INST_TLB_VECTOR 0x0400
-#define IA64_DATA_TLB_VECTOR 0x0800
-#define IA64_ALT_INST_TLB_VECTOR 0x0c00
-#define IA64_ALT_DATA_TLB_VECTOR 0x1000
-#define IA64_DATA_NESTED_TLB_VECTOR 0x1400
-#define IA64_INST_KEY_MISS_VECTOR 0x1800
-#define IA64_DATA_KEY_MISS_VECTOR 0x1c00
-#define IA64_DIRTY_BIT_VECTOR 0x2000
-#define IA64_INST_ACCESS_BIT_VECTOR 0x2400
-#define IA64_DATA_ACCESS_BIT_VECTOR 0x2800
-#define IA64_BREAK_VECTOR 0x2c00
-#define IA64_EXTINT_VECTOR 0x3000
-#define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000
-#define IA64_KEY_PERMISSION_VECTOR 0x5100
-#define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200
-#define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300
-#define IA64_GENEX_VECTOR 0x5400
-#define IA64_DISABLED_FPREG_VECTOR 0x5500
-#define IA64_NAT_CONSUMPTION_VECTOR 0x5600
-#define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */
-#define IA64_DEBUG_VECTOR 0x5900
-#define IA64_UNALIGNED_REF_VECTOR 0x5a00
-#define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00
-#define IA64_FP_FAULT_VECTOR 0x5c00
-#define IA64_FP_TRAP_VECTOR 0x5d00
-#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00
-#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
-#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
-
-#define IA64_NO_FAULT 0x0000
-#define IA64_FAULT 0x0001
-#define IA64_RFI_IN_PROGRESS 0x0002
-// To avoid conflicting with return value of handle_fpu_swa()
-// set IA64_RETRY to -0x000f
-#define IA64_RETRY (-0x000f)
-#define IA64_FORCED_IFA 0x0004
-#define IA64_USE_TLB 0x0005
-#define IA64_ILLOP_FAULT (IA64_GENEX_VECTOR | 0x00)
-#define IA64_PRIVOP_FAULT (IA64_GENEX_VECTOR | 0x10)
-#define IA64_PRIVREG_FAULT (IA64_GENEX_VECTOR | 0x20)
-#define IA64_RSVDREG_FAULT (IA64_GENEX_VECTOR | 0x30)
-#define IA64_DISIST_FAULT (IA64_GENEX_VECTOR | 0x40)
-#define IA64_ILLDEP_FAULT (IA64_GENEX_VECTOR | 0x80)
-#define IA64_DTLB_FAULT (IA64_DATA_TLB_VECTOR)
-#define IA64_VHPT_FAULT (IA64_VHPT_TRANS_VECTOR | 0x7)
-#if !defined(__ASSEMBLY__)
-typedef unsigned long IA64FAULT;
-typedef unsigned long IA64INTVECTOR;
-#endif /* !ASSEMBLY */
-#endif
diff --git a/xen/include/asm-ia64/init.h b/xen/include/asm-ia64/init.h
deleted file mode 100644
index 5295b35e63..0000000000
--- a/xen/include/asm-ia64/init.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _XEN_ASM_INIT_H
-#define _XEN_ASM_INIT_H
-
-#endif /* _XEN_ASM_INIT_H */
diff --git a/xen/include/asm-ia64/iocap.h b/xen/include/asm-ia64/iocap.h
deleted file mode 100644
index ae7a871e0c..0000000000
--- a/xen/include/asm-ia64/iocap.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/******************************************************************************
- * iocap.h
- *
- * Architecture-specific per-domain I/O capabilities.
- */
-
-#ifndef __IA64_IOCAP_H__
-#define __IA64_IOCAP_H__
-
-extern int ioports_permit_access(struct domain *d, unsigned int gs,
- unsigned int s, unsigned int e);
-extern int ioports_deny_access(struct domain *d,
- unsigned int s, unsigned int e);
-
-#define ioports_access_permitted(d, s, e) \
- rangeset_contains_range((d)->arch.ioport_caps, s, e)
-
-#define multipage_allocation_permitted(d, order) \
- (((order) == 0) || \
- !rangeset_is_empty((d)->iomem_caps) || \
- !rangeset_is_empty((d)->arch.ioport_caps))
-
-#endif /* __IA64_IOCAP_H__ */
diff --git a/xen/include/asm-ia64/kexec.h b/xen/include/asm-ia64/kexec.h
deleted file mode 100644
index 1072d65f6a..0000000000
--- a/xen/include/asm-ia64/kexec.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __IA64_KEXEC_H__
-#define __IA64_KEXEC_H__
-
-#include <xen/types.h>
-#include <xen/kexec.h>
-
-struct rsvd_region;
-
-extern const unsigned int relocate_new_kernel_size;
-extern void relocate_new_kernel(unsigned long indirection_page,
- unsigned long start_address,
- unsigned long boot_param,
- unsigned long dom0_relocate_new_kernel);
-void crash_save_xen_notes(void);
-void kexec_disable_iosapic(void);
-void machine_kexec(xen_kexec_image_t *image);
-unsigned long kdump_find_rsvd_region(unsigned long size,
- struct rsvd_region *rsvd_regions, int n);
-
-#endif /* __IA64_KEXEC_H__ */
diff --git a/xen/include/asm-ia64/linux-null/README.origin b/xen/include/asm-ia64/linux-null/README.origin
deleted file mode 100644
index 7432eb7d2d..0000000000
--- a/xen/include/asm-ia64/linux-null/README.origin
+++ /dev/null
@@ -1,3 +0,0 @@
-Files in this directory (and subdirectories) are intentionally left blank.
-The sole purpose of this is to avoid removing/ifdefing include lines
-from various source files.
diff --git a/xen/include/asm-ia64/linux-null/asm/cyclone.h b/xen/include/asm-ia64/linux-null/asm/cyclone.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/cyclone.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/desc.h b/xen/include/asm-ia64/linux-null/asm/desc.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/desc.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/ia32.h b/xen/include/asm-ia64/linux-null/asm/ia32.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/ia32.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/mman.h b/xen/include/asm-ia64/linux-null/asm/mman.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/mmzone.h b/xen/include/asm-ia64/linux-null/asm/mmzone.h
deleted file mode 100644
index 81447b2f74..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/mmzone.h
+++ /dev/null
@@ -1 +0,0 @@
-/* Empty file. */
diff --git a/xen/include/asm-ia64/linux-null/asm/module.h b/xen/include/asm-ia64/linux-null/asm/module.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/module.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/nmi.h b/xen/include/asm-ia64/linux-null/asm/nmi.h
deleted file mode 100644
index 8463250968..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/nmi.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __IA64_NMI_H__
-#define __IA64_NMI_H__
-
-#define register_guest_nmi_callback(a) (-ENOSYS)
-#define unregister_guest_nmi_callback() (-ENOSYS)
-
-#endif /* __IA64_NMI_H__ */
diff --git a/xen/include/asm-ia64/linux-null/asm/pdb.h b/xen/include/asm-ia64/linux-null/asm/pdb.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/pdb.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/ptrace_offsets.h b/xen/include/asm-ia64/linux-null/asm/ptrace_offsets.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/ptrace_offsets.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/scatterlist.h b/xen/include/asm-ia64/linux-null/asm/scatterlist.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/scatterlist.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/semaphore.h b/xen/include/asm-ia64/linux-null/asm/semaphore.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/serial.h b/xen/include/asm-ia64/linux-null/asm/serial.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/signal.h b/xen/include/asm-ia64/linux-null/asm/signal.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/signal.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/sn/arch.h b/xen/include/asm-ia64/linux-null/asm/sn/arch.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/sn/arch.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/sn/geo.h b/xen/include/asm-ia64/linux-null/asm/sn/geo.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/sn/geo.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/sn/nodepda.h b/xen/include/asm-ia64/linux-null/asm/sn/nodepda.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/sn/nodepda.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/sn/sn_cpuid.h b/xen/include/asm-ia64/linux-null/asm/sn/sn_cpuid.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/sn/sn_cpuid.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/ustack.h b/xen/include/asm-ia64/linux-null/asm/ustack.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/ustack.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/asm/xen/hypervisor.h b/xen/include/asm-ia64/linux-null/asm/xen/hypervisor.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/asm/xen/hypervisor.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/bootmem.h b/xen/include/asm-ia64/linux-null/linux/bootmem.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/bootmem.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/capability.h b/xen/include/asm-ia64/linux-null/linux/capability.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/capability.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/completion.h b/xen/include/asm-ia64/linux-null/linux/completion.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/completion.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/device.h b/xen/include/asm-ia64/linux-null/linux/device.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/device.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/dmapool.h b/xen/include/asm-ia64/linux-null/linux/dmapool.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/dmapool.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/file.h b/xen/include/asm-ia64/linux-null/linux/file.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/file.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/kallsyms.h b/xen/include/asm-ia64/linux-null/linux/kallsyms.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/kallsyms.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/kernel_stat.h b/xen/include/asm-ia64/linux-null/linux/kernel_stat.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/kernel_stat.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/mmzone.h b/xen/include/asm-ia64/linux-null/linux/mmzone.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/mmzone.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/module.h b/xen/include/asm-ia64/linux-null/linux/module.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/module.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/mount.h b/xen/include/asm-ia64/linux-null/linux/mount.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/mount.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/node.h b/xen/include/asm-ia64/linux-null/linux/node.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/node.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/page-flags.h b/xen/include/asm-ia64/linux-null/linux/page-flags.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/page-flags.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/pagemap.h b/xen/include/asm-ia64/linux-null/linux/pagemap.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/pagemap.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/platform.h b/xen/include/asm-ia64/linux-null/linux/platform.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/platform.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/pm.h b/xen/include/asm-ia64/linux-null/linux/pm.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/pm.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/poll.h b/xen/include/asm-ia64/linux-null/linux/poll.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/proc_fs.h b/xen/include/asm-ia64/linux-null/linux/proc_fs.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/proc_fs.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/profile.h b/xen/include/asm-ia64/linux-null/linux/profile.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/profile.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/ptrace.h b/xen/include/asm-ia64/linux-null/linux/ptrace.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/ptrace.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/random.h b/xen/include/asm-ia64/linux-null/linux/random.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/random.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/rcupdate.h b/xen/include/asm-ia64/linux-null/linux/rcupdate.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/rcupdate.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/rtc.h b/xen/include/asm-ia64/linux-null/linux/rtc.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/rtc.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/rwsem.h b/xen/include/asm-ia64/linux-null/linux/rwsem.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/rwsem.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/seq_file.h b/xen/include/asm-ia64/linux-null/linux/seq_file.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/seq_file.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/serial.h b/xen/include/asm-ia64/linux-null/linux/serial.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/serial.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/serial_core.h b/xen/include/asm-ia64/linux-null/linux/serial_core.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/serial_core.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/signal.h b/xen/include/asm-ia64/linux-null/linux/signal.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/signal.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/slab.h b/xen/include/asm-ia64/linux-null/linux/slab.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/slab.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/smp_lock.h b/xen/include/asm-ia64/linux-null/linux/smp_lock.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/smp_lock.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/swap.h b/xen/include/asm-ia64/linux-null/linux/swap.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/swap.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/sysctl.h b/xen/include/asm-ia64/linux-null/linux/sysctl.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/sysctl.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/threads.h b/xen/include/asm-ia64/linux-null/linux/threads.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/threads.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/tty.h b/xen/include/asm-ia64/linux-null/linux/tty.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/tty.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/vfs.h b/xen/include/asm-ia64/linux-null/linux/vfs.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/vfs.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/vmalloc.h b/xen/include/asm-ia64/linux-null/linux/vmalloc.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/vmalloc.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/workqueue.h b/xen/include/asm-ia64/linux-null/linux/workqueue.h
deleted file mode 100644
index 29f4f4cd69..0000000000
--- a/xen/include/asm-ia64/linux-null/linux/workqueue.h
+++ /dev/null
@@ -1 +0,0 @@
-/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-xen/asm-generic/README.origin b/xen/include/asm-ia64/linux-xen/asm-generic/README.origin
deleted file mode 100644
index 6cd78a86fb..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm-generic/README.origin
+++ /dev/null
@@ -1,8 +0,0 @@
-# Source files in this directory are near-identical copies of linux-2.6.13
-# files:
-
-# NOTE: ALL changes to these files should be clearly marked
-# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
-# easily updated to future versions of the corresponding Linux files.
-
-pgtable-nopud.h -> linux/include/asm-generic/pgtable-nopud.h
diff --git a/xen/include/asm-ia64/linux-xen/asm-generic/pgtable-nopud.h b/xen/include/asm-ia64/linux-xen/asm-generic/pgtable-nopud.h
deleted file mode 100644
index 7e375bbdd0..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm-generic/pgtable-nopud.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#ifndef _PGTABLE_NOPUD_H
-#define _PGTABLE_NOPUD_H
-
-#ifndef __ASSEMBLY__
-
-#define __PAGETABLE_PUD_FOLDED
-
-/*
- * Having the pud type consist of a pgd gets the size right, and allows
- * us to conceptually access the pgd entry that this pud is folded into
- * without casting.
- */
-typedef struct { pgd_t pgd; } pud_t;
-
-#define PUD_SHIFT PGDIR_SHIFT
-#define PTRS_PER_PUD 1
-#define PUD_SIZE (1UL << PUD_SHIFT)
-#define PUD_MASK (~(PUD_SIZE-1))
-
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pud is never bad, and a pud always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-static inline int pgd_present(pgd_t pgd) { return 1; }
-static inline void pgd_clear(pgd_t *pgd) { }
-#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
-
-#define pgd_populate(mm, pgd, pud) do { } while (0)
-/*
- * (puds are folded into pgds so this doesn't get actually called,
- * but the define is needed for a generic inline function.)
- */
-#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
-
-#ifndef XEN
-static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
-{
- return (pud_t *)pgd;
-}
-#else
-static inline volatile pud_t *
-pud_offset(volatile pgd_t * pgd, unsigned long address)
-{
- return (volatile pud_t *)pgd;
-}
-#endif
-
-#define pud_val(x) (pgd_val((x).pgd))
-#define __pud(x) ((pud_t) { __pgd(x) } )
-
-#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
-#define pgd_page_kernel(pgd) (pud_page_kernel((pud_t){ pgd }))
-
-/*
- * allocating and freeing a pud is trivial: the 1-entry pud is
- * inside the pgd, so has no extra memory associated with it.
- */
-#define pud_alloc_one(mm, address) NULL
-#define pud_free(x) do { } while (0)
-#define __pud_free_tlb(tlb, x) do { } while (0)
-
-#undef pud_addr_end
-#define pud_addr_end(addr, end) (end)
-
-#endif /* __ASSEMBLY__ */
-#endif /* _PGTABLE_NOPUD_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/README.origin b/xen/include/asm-ia64/linux-xen/asm/README.origin
deleted file mode 100644
index 8d86bc9bb2..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/README.origin
+++ /dev/null
@@ -1,51 +0,0 @@
-# Source files in this directory are near-identical copies of linux-2.6.13
-# files:
-
-# NOTE: ALL changes to these files should be clearly marked
-# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
-# easily updated to future versions of the corresponding Linux files.
-
-atomic.h -> linux/include/asm-ia64/atomic.h
-cache.h -> linux/include/asm-ia64/cache.h
-gcc_intrin.h -> linux/include/asm-ia64/gcc_intrin.h
-ia64regs.h -> linux/include/asm-ia64/ia64regs.h
-io.h -> linux/include/asm-ia64/io.h
-irq.h -> linux/include/asm-ia64/irq.h
-hw_irq.h -> linux/include/asm-ia64/hw_irq.h
-kregs.h -> linux/include/asm-ia64/kregs.h
-mca_asm.h -> linux/include/asm-ia64/mca_asm.h
-meminit.h -> linux/include/asm-ia64/meminit.h
-numa.h -> linux/include/asm-ia64/numa.h
-page.h -> linux/include/asm-ia64/page.h
-percpu.h -> linux/include/asm-ia64/percpu.h
-pgalloc.h -> linux/include/asm-ia64/pgalloc.h
-pgtable.h -> linux/include/asm-ia64/pgtable.h
-processor.h -> linux/include/asm-ia64/processor.h
-ptrace.h -> linux/include/asm-ia64/ptrace.h
-sal.h -> linux/include/asm-ia64/sal.h
-sections.h -> linux/include/asm-ia64/sections.h
-smp.h -> linux/include/asm-ia64/smp.h
-spinlock.h -> linux/include/asm-ia64/spinlock.h
-system.h -> linux/include/asm-ia64/system.h
-tlbflush.h -> linux/include/asm-ia64/tlbflush.h
-types.h -> linux/include/asm-ia64/types.h
-
-# The files below are from Linux-2.6.16
-iosapic.h -> linux/include/asm-ia64/iosapic.h
-
-# The files below are from Linux-2.6.16.33
-perfmon.h -> linux/include/asm-ia64/perfmon.h
-perfmon_default_smpl.h -> linux/include/asm-ia64/perfmon_default_smpl.h
-
-# The files below are from Linux-2.6.19
-machvec.h -> linux/include/asm-ia64/machvec.h
-machvec_dig.h -> linux/include/asm-ia64/machvec_dig.h
-machvec_sn2.h -> linux/include/asm-ia64/machvec_sn2.h
-machvec_hpzx1.h -> linux/include/asm-ia64/machvec_hpzx1.h
-machvec_pci.h -> linux/include/asm-ia64/pci.h
-
-# The files below are from Linux-2.6.21
-pal.h -> linux/include/asm-ia64/pal.h
-
-# The files below are from Linux-2.6.26-rc5
-acpi.h -> linux/include/asm-ia64/acpi.h
diff --git a/xen/include/asm-ia64/linux-xen/asm/acpi.h b/xen/include/asm-ia64/linux-xen/asm/acpi.h
deleted file mode 100644
index ab07b9acea..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/acpi.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * asm-ia64/acpi.h
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
- * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#ifndef _ASM_ACPI_H
-#define _ASM_ACPI_H
-
-#ifdef __KERNEL__
-
-#include <acpi/pdc_intel.h>
-
-#include <linux/init.h>
-#include <linux/numa.h>
-#include <asm/system.h>
-#include <asm/numa.h>
-#ifdef XEN
-#include <xen/nodemask.h>
-extern int acpi_dmar_init(void);
-#endif
-
-#define COMPILER_DEPENDENT_INT64 long
-#define COMPILER_DEPENDENT_UINT64 unsigned long
-
-/*
- * Calling conventions:
- *
- * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
- * ACPI_EXTERNAL_XFACE - External ACPI interfaces
- * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
- * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
- */
-#define ACPI_SYSTEM_XFACE
-#define ACPI_EXTERNAL_XFACE
-#define ACPI_INTERNAL_XFACE
-#define ACPI_INTERNAL_VAR_XFACE
-
-/* Asm macros */
-
-#define ACPI_ASM_MACROS
-#define BREAKPOINT3
-#define ACPI_DISABLE_IRQS() local_irq_disable()
-#define ACPI_ENABLE_IRQS() local_irq_enable()
-#define ACPI_FLUSH_CPU_CACHE()
-
-static inline int
-ia64_acpi_acquire_global_lock (unsigned int *lock)
-{
- unsigned int old, new, val;
- do {
- old = *lock;
- new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
- val = ia64_cmpxchg4_acq(lock, new, old);
- } while (unlikely (val != old));
- return (new < 3) ? -1 : 0;
-}
-
-static inline int
-ia64_acpi_release_global_lock (unsigned int *lock)
-{
- unsigned int old, new, val;
- do {
- old = *lock;
- new = old & ~0x3;
- val = ia64_cmpxchg4_acq(lock, new, old);
- } while (unlikely (val != old));
- return old & 0x1;
-}
-
-#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
- ((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
-
-#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
- ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
-
-#define acpi_disabled 0 /* ACPI always enabled on IA64 */
-#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
-#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
-static inline void disable_acpi(void) { }
-
-const char *acpi_get_sysname (void);
-int acpi_request_vector (u32 int_type);
-int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
-
-/* routines for saving/restoring kernel state */
-extern int acpi_save_state_mem(void);
-extern void acpi_restore_state_mem(void);
-extern unsigned long acpi_wakeup_address;
-
-/*
- * Record the cpei override flag and current logical cpu. This is
- * useful for CPU removal.
- */
-extern unsigned int can_cpei_retarget(void);
-extern unsigned int is_cpu_cpei_target(unsigned int cpu);
-extern void set_cpei_target_cpu(unsigned int cpu);
-extern unsigned int get_cpei_target_cpu(void);
-extern void prefill_possible_map(void);
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-extern int additional_cpus;
-#else
-#define additional_cpus 0
-#endif
-
-#ifdef CONFIG_ACPI_NUMA
-#if MAX_NUMNODES > 256
-#define MAX_PXM_DOMAINS MAX_NUMNODES
-#else
-#define MAX_PXM_DOMAINS (256)
-#endif
-extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
-extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
-#endif
-
-#define acpi_unlazy_tlb(x)
-
-#ifdef CONFIG_ACPI_NUMA
-extern cpumask_t early_cpu_possible_map;
-#define for_each_possible_early_cpu(cpu) \
- for_each_cpu(cpu, &early_cpu_possible_map)
-
-static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
-{
- int low_cpu, high_cpu;
- int cpu;
- int next_nid = 0;
-
- low_cpu = cpumask_weight(&early_cpu_possible_map);
-
- high_cpu = max(low_cpu, min_cpus);
- high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
-
- for (cpu = low_cpu; cpu < high_cpu; cpu++) {
- cpumask_set_cpu(cpu, &early_cpu_possible_map);
- if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
- node_cpuid[cpu].nid = next_nid;
- next_nid++;
- if (next_nid >= num_online_nodes())
- next_nid = 0;
- }
- }
-}
-#endif /* CONFIG_ACPI_NUMA */
-
-#endif /*__KERNEL__*/
-
-#endif /*_ASM_ACPI_H*/
diff --git a/xen/include/asm-ia64/linux-xen/asm/atomic.h b/xen/include/asm-ia64/linux-xen/asm/atomic.h
deleted file mode 100644
index c006ae28ac..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/atomic.h
+++ /dev/null
@@ -1,259 +0,0 @@
-#ifndef _ASM_IA64_ATOMIC_H
-#define _ASM_IA64_ATOMIC_H
-
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- *
- * NOTE: don't mess with the types below! The "unsigned long" and
- * "int" types were carefully placed so as to ensure proper operation
- * of the macros.
- *
- * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-#include <linux/types.h>
-
-#include <asm/intrinsics.h>
-
-/*
- * On IA-64, counter must always be volatile to ensure that that the
- * memory accesses are ordered.
- */
-typedef struct { volatile __s32 counter; } atomic_t;
-typedef struct { volatile __s64 counter; } atomic64_t;
-
-#ifndef XEN
-
-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
-
-#define atomic_read(v) ((v)->counter)
-#define atomic64_read(v) ((v)->counter)
-
-#define atomic_set(v,i) (((v)->counter) = (i))
-#define atomic64_set(v,i) (((v)->counter) = (i))
-
-#else
-
-#define ATOMIC_INIT(i) { (i) }
-#define ATOMIC64_INIT(i) { (i) }
-
-#define build_read_atomic(tag, type) \
-static inline type read_##tag##_atomic(const volatile type *addr) \
-{ \
- type ret; \
- asm volatile("ld%2.acq %0 = %1" \
- : "=r" (ret) \
- : "m" (*addr), "i" (sizeof(type))); \
- return ret; \
-}
-
-#define build_write_atomic(tag, type) \
-static inline void write_##tag##_atomic(volatile type *addr, type val) \
-{ \
- asm volatile("st%2.rel %0 = %1" \
- : "=m" (*addr) \
- : "r" (val), "i" (sizeof(type))); \
-}
-
-build_read_atomic(u8, uint8_t)
-build_read_atomic(u16, uint16_t)
-build_read_atomic(u32, uint32_t)
-build_read_atomic(u64, uint64_t)
-
-build_write_atomic(u8, uint8_t)
-build_write_atomic(u16, uint16_t)
-build_write_atomic(u32, uint32_t)
-build_write_atomic(u64, uint64_t)
-
-#undef build_read_atomic
-#undef build_write_atomic
-
-void __bad_atomic_size(void);
-
-#define read_atomic(p) ({ \
- typeof(*p) __x; \
- switch ( sizeof(*p) ) { \
- case 1: __x = (typeof(*p))read_u8_atomic((uint8_t *)p); break; \
- case 2: __x = (typeof(*p))read_u16_atomic((uint16_t *)p); break; \
- case 4: __x = (typeof(*p))read_u32_atomic((uint32_t *)p); break; \
- case 8: __x = (typeof(*p))read_u64_atomic((uint64_t *)p); break; \
- default: __x = 0; __bad_atomic_size(); break; \
- } \
- __x; \
-})
-
-#define write_atomic(p, x) ({ \
- typeof(*p) __x = (x); \
- switch ( sizeof(*p) ) { \
- case 1: write_u8_atomic((uint8_t *)p, (uint8_t)__x); break; \
- case 2: write_u16_atomic((uint16_t *)p, (uint16_t)__x); break; \
- case 4: write_u32_atomic((uint32_t *)p, (uint32_t)__x); break; \
- case 8: write_u64_atomic((uint64_t *)p, (uint64_t)__x); break; \
- default: __bad_atomic_size(); break; \
- } \
- __x; \
-})
-
-#define _atomic_read(v) ((v).counter)
-#define _atomic64_read(v) ((v).counter)
-#define atomic_read(v) read_atomic(&((v)->counter))
-#define atomic64_read(v) read_atomic(&((v)->counter))
-
-#define _atomic_set(v,i) (((v).counter) = (i))
-#define _atomic64_set(v,i) (((v).counter) = (i))
-#define atomic_set(v,i) write_atomic(&((v)->counter), i)
-#define atomic64_set(v,l) write_atomic(&((v)->counter), l)
-
-#endif
-
-static __inline__ int
-ia64_atomic_add (int i, atomic_t *v)
-{
- __s32 old, new;
- CMPXCHG_BUGCHECK_DECL
-
- do {
- CMPXCHG_BUGCHECK(v);
- old = atomic_read(v);
- new = old + i;
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
- return new;
-}
-
-static __inline__ int
-ia64_atomic64_add (__s64 i, atomic64_t *v)
-{
- __s64 old, new;
- CMPXCHG_BUGCHECK_DECL
-
- do {
- CMPXCHG_BUGCHECK(v);
- old = atomic64_read(v);
- new = old + i;
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
- return new;
-}
-
-static __inline__ int
-ia64_atomic_sub (int i, atomic_t *v)
-{
- __s32 old, new;
- CMPXCHG_BUGCHECK_DECL
-
- do {
- CMPXCHG_BUGCHECK(v);
- old = atomic_read(v);
- new = old - i;
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
- return new;
-}
-
-static __inline__ int
-ia64_atomic64_sub (__s64 i, atomic64_t *v)
-{
- __s64 old, new;
- CMPXCHG_BUGCHECK_DECL
-
- do {
- CMPXCHG_BUGCHECK(v);
- old = atomic64_read(v);
- new = old - i;
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
- return new;
-}
-
-#define atomic_add_return(i,v) \
-({ \
- int __ia64_aar_i = (i); \
- (__builtin_constant_p(i) \
- && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
- || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
- || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
- || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
- ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
- : ia64_atomic_add(__ia64_aar_i, v); \
-})
-
-#define atomic64_add_return(i,v) \
-({ \
- long __ia64_aar_i = (i); \
- (__builtin_constant_p(i) \
- && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
- || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
- || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
- || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
- ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
- : ia64_atomic64_add(__ia64_aar_i, v); \
-})
-
-/*
- * Atomically add I to V and return TRUE if the resulting value is
- * negative.
- */
-static __inline__ int
-atomic_add_negative (int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
-static __inline__ int
-atomic64_add_negative (__s64 i, atomic64_t *v)
-{
- return atomic64_add_return(i, v) < 0;
-}
-
-#define atomic_sub_return(i,v) \
-({ \
- int __ia64_asr_i = (i); \
- (__builtin_constant_p(i) \
- && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
- || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
- || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
- || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
- ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
- : ia64_atomic_sub(__ia64_asr_i, v); \
-})
-
-#define atomic64_sub_return(i,v) \
-({ \
- long __ia64_asr_i = (i); \
- (__builtin_constant_p(i) \
- && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
- || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
- || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
- || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
- ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
- : ia64_atomic64_sub(__ia64_asr_i, v); \
-})
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
-
-#define atomic_add(i,v) atomic_add_return((i), (v))
-#define atomic_sub(i,v) atomic_sub_return((i), (v))
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
-
-#define atomic64_add(i,v) atomic64_add_return((i), (v))
-#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
-#define atomic64_inc(v) atomic64_add(1, (v))
-#define atomic64_dec(v) atomic64_sub(1, (v))
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
-#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/cache.h b/xen/include/asm-ia64/linux-xen/asm/cache.h
deleted file mode 100644
index 542d2e23e4..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/cache.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef _ASM_IA64_CACHE_H
-#define _ASM_IA64_CACHE_H
-
-#include <linux/config.h>
-
-/*
- * Copyright (C) 1998-2000 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-/* Bytes per L1 (data) cache line. */
-#define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
-
-#ifdef XEN
-# define SMP_CACHE_SHIFT L1_CACHE_SHIFT
-# define SMP_CACHE_BYTES L1_CACHE_BYTES
-#else
-#ifdef CONFIG_SMP
-# define SMP_CACHE_SHIFT L1_CACHE_SHIFT
-# define SMP_CACHE_BYTES L1_CACHE_BYTES
-#else
- /*
- * The "aligned" directive can only _increase_ alignment, so this is
- * safe and provides an easy way to avoid wasting space on a
- * uni-processor:
- */
-# define SMP_CACHE_SHIFT 3
-# define SMP_CACHE_BYTES (1 << 3)
-#endif
-#endif
-
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
-
-#endif /* _ASM_IA64_CACHE_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h b/xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h
deleted file mode 100644
index 1339f9ebf9..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h
+++ /dev/null
@@ -1,605 +0,0 @@
-#ifndef _ASM_IA64_GCC_INTRIN_H
-#define _ASM_IA64_GCC_INTRIN_H
-/*
- *
- * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
- * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
- */
-
-#include <linux/compiler.h>
-
-/* define this macro to get some asm stmts included in 'c' files */
-#define ASM_SUPPORTED
-
-/* Optimization barrier */
-/* The "volatile" is due to gcc bugs */
-#define ia64_barrier() asm volatile ("":::"memory")
-
-#define ia64_stop() asm volatile (";;"::)
-
-#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
-
-#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
-
-extern void ia64_bad_param_for_setreg (void);
-extern void ia64_bad_param_for_getreg (void);
-
-register unsigned long ia64_r13 asm ("r13") __attribute_used__;
-
-#define ia64_setreg(regnum, val) \
-({ \
- switch (regnum) { \
- case _IA64_REG_PSR_L: \
- asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
- break; \
- case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
- asm volatile ("mov ar%0=%1" :: \
- "i" (regnum - _IA64_REG_AR_KR0), \
- "r"(val): "memory"); \
- break; \
- case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
- asm volatile ("mov cr%0=%1" :: \
- "i" (regnum - _IA64_REG_CR_DCR), \
- "r"(val): "memory" ); \
- break; \
- case _IA64_REG_SP: \
- asm volatile ("mov r12=%0" :: \
- "r"(val): "memory"); \
- break; \
- case _IA64_REG_GP: \
- asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
- break; \
- default: \
- ia64_bad_param_for_setreg(); \
- break; \
- } \
-})
-
-#define ia64_getreg(regnum) \
-({ \
- __u64 ia64_intri_res; \
- \
- switch (regnum) { \
- case _IA64_REG_GP: \
- asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
- break; \
- case _IA64_REG_IP: \
- asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
- break; \
- case _IA64_REG_PSR: \
- asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
- break; \
- case _IA64_REG_TP: /* for current() */ \
- ia64_intri_res = ia64_r13; \
- break; \
- case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
- asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
- : "i"(regnum - _IA64_REG_AR_KR0)); \
- break; \
- case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
- asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
- : "i" (regnum - _IA64_REG_CR_DCR)); \
- break; \
- case _IA64_REG_SP: \
- asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
- break; \
- default: \
- ia64_bad_param_for_getreg(); \
- break; \
- } \
- ia64_intri_res; \
-})
-
-#define ia64_hint_pause 0
-
-#define ia64_hint(mode) \
-({ \
- switch (mode) { \
- case ia64_hint_pause: \
- asm volatile ("hint @pause" ::: "memory"); \
- break; \
- } \
-})
-
-
-/* Integer values for mux1 instruction */
-#define ia64_mux1_brcst 0
-#define ia64_mux1_mix 8
-#define ia64_mux1_shuf 9
-#define ia64_mux1_alt 10
-#define ia64_mux1_rev 11
-
-#define ia64_mux1(x, mode) \
-({ \
- __u64 ia64_intri_res; \
- \
- switch (mode) { \
- case ia64_mux1_brcst: \
- asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
- break; \
- case ia64_mux1_mix: \
- asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
- break; \
- case ia64_mux1_shuf: \
- asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
- break; \
- case ia64_mux1_alt: \
- asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
- break; \
- case ia64_mux1_rev: \
- asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
- break; \
- } \
- ia64_intri_res; \
-})
-
-#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
-# define ia64_popcnt(x) __builtin_popcountl(x)
-#else
-# define ia64_popcnt(x) \
- ({ \
- __u64 ia64_intri_res; \
- asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
- \
- ia64_intri_res; \
- })
-#endif
-
-#define ia64_getf_exp(x) \
-({ \
- long ia64_intri_res; \
- \
- asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
- \
- ia64_intri_res; \
-})
-
-#define ia64_shrp(a, b, count) \
-({ \
- __u64 ia64_intri_res; \
- asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
- ia64_intri_res; \
-})
-
-#define ia64_ldfs(regnum, x) \
-({ \
- register double __f__ asm ("f"#regnum); \
- asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
-})
-
-#define ia64_ldfd(regnum, x) \
-({ \
- register double __f__ asm ("f"#regnum); \
- asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
-})
-
-#define ia64_ldfe(regnum, x) \
-({ \
- register double __f__ asm ("f"#regnum); \
- asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
-})
-
-#define ia64_ldf8(regnum, x) \
-({ \
- register double __f__ asm ("f"#regnum); \
- asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
-})
-
-#define ia64_ldf_fill(regnum, x) \
-({ \
- register double __f__ asm ("f"#regnum); \
- asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
-})
-
-#define ia64_stfs(x, regnum) \
-({ \
- register double __f__ asm ("f"#regnum); \
- asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
-})
-
-#define ia64_stfd(x, regnum) \
-({ \
- register double __f__ asm ("f"#regnum); \
- asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
-})
-
-#define ia64_stfe(x, regnum) \
-({ \
- register double __f__ asm ("f"#regnum); \
- asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
-})
-
-#define ia64_stf8(x, regnum) \
-({ \
- register double __f__ asm ("f"#regnum); \
- asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
-})
-
-#define ia64_stf_spill(x, regnum) \
-({ \
- register double __f__ asm ("f"#regnum); \
- asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
-})
-
-#define ia64_fetchadd4_acq(p, inc) \
-({ \
- \
- __u64 ia64_intri_res; \
- asm volatile ("fetchadd4.acq %0=[%1],%2" \
- : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
- : "memory"); \
- \
- ia64_intri_res; \
-})
-
-#define ia64_fetchadd4_rel(p, inc) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("fetchadd4.rel %0=[%1],%2" \
- : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
- : "memory"); \
- \
- ia64_intri_res; \
-})
-
-#define ia64_fetchadd8_acq(p, inc) \
-({ \
- \
- __u64 ia64_intri_res; \
- asm volatile ("fetchadd8.acq %0=[%1],%2" \
- : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
- : "memory"); \
- \
- ia64_intri_res; \
-})
-
-#define ia64_fetchadd8_rel(p, inc) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("fetchadd8.rel %0=[%1],%2" \
- : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
- : "memory"); \
- \
- ia64_intri_res; \
-})
-
-#define ia64_xchg1(ptr,x) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("xchg1 %0=[%1],%2" \
- : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_xchg2(ptr,x) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
- : "r" (ptr), "r" (x) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_xchg4(ptr,x) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
- : "r" (ptr), "r" (x) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_xchg8(ptr,x) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
- : "r" (ptr), "r" (x) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_cmpxchg1_acq(ptr, new, old) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
- asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
- "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_cmpxchg1_rel(ptr, new, old) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
- asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
- "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_cmpxchg2_acq(ptr, new, old) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
- asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
- "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_cmpxchg2_rel(ptr, new, old) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
- \
- asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
- "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_cmpxchg4_acq(ptr, new, old) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
- asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
- "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_cmpxchg4_rel(ptr, new, old) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
- asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
- "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_cmpxchg8_acq(ptr, new, old) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
- asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
- "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_cmpxchg8_rel(ptr, new, old) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
- \
- asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
- "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
- ia64_intri_res; \
-})
-
-#define ia64_mf() asm volatile ("mf" ::: "memory")
-#define ia64_mfa() asm volatile ("mf.a" ::: "memory")
-
-#define ia64_invala() asm volatile ("invala" ::: "memory")
-
-#define ia64_thash(addr) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
- ia64_intri_res; \
-})
-
-#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
-#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
-
-#ifdef HAVE_SERIALIZE_DIRECTIVE
-# define ia64_dv_serialize_data() asm volatile (".serialize.data");
-# define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
-#else
-# define ia64_dv_serialize_data()
-# define ia64_dv_serialize_instruction()
-#endif
-
-#define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
-
-#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
-
-#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
-
-
-#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
- :: "r"(trnum), "r"(addr) : "memory")
-
-#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
- :: "r"(trnum), "r"(addr) : "memory")
-
-#define ia64_tpa(addr) \
-({ \
- __u64 ia64_pa; \
- asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
- ia64_pa; \
-})
-
-#define __ia64_set_dbr(index, val) \
- asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
-#define ia64_set_ibr(index, val) \
- asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
-#define ia64_set_pkr(index, val) \
- asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
-#define ia64_set_pmc(index, val) \
- asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
-#define ia64_set_pmd(index, val) \
- asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
-#define ia64_set_rr(index, val) \
- asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
-
-#define ia64_get_cpuid(index) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
- ia64_intri_res; \
-})
-
-#define __ia64_get_dbr(index) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
- ia64_intri_res; \
-})
-
-#define ia64_get_ibr(index) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
- ia64_intri_res; \
-})
-
-#define ia64_get_pkr(index) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
- ia64_intri_res; \
-})
-
-#define ia64_get_pmc(index) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
- ia64_intri_res; \
-})
-
-
-#define ia64_get_pmd(index) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
- ia64_intri_res; \
-})
-
-#define ia64_get_rr(index) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
- ia64_intri_res; \
-})
-
-#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
-
-
-#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
-
-#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
-#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
-#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
-#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
-
-#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
-
-#define ia64_ptcga(addr, size) \
-do { \
- asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
- ia64_dv_serialize_data(); \
-} while (0)
-
-#define ia64_ptcl(addr, size) \
-do { \
- asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
- ia64_dv_serialize_data(); \
-} while (0)
-
-#define ia64_ptri(addr, size) \
- asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
-
-#define ia64_ptrd(addr, size) \
- asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
-
-/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
-
-#define ia64_lfhint_none 0
-#define ia64_lfhint_nt1 1
-#define ia64_lfhint_nt2 2
-#define ia64_lfhint_nta 3
-
-#define ia64_lfetch(lfhint, y) \
-({ \
- switch (lfhint) { \
- case ia64_lfhint_none: \
- asm volatile ("lfetch [%0]" : : "r"(y)); \
- break; \
- case ia64_lfhint_nt1: \
- asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
- break; \
- case ia64_lfhint_nt2: \
- asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
- break; \
- case ia64_lfhint_nta: \
- asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
- break; \
- } \
-})
-
-#define ia64_lfetch_excl(lfhint, y) \
-({ \
- switch (lfhint) { \
- case ia64_lfhint_none: \
- asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
- break; \
- case ia64_lfhint_nt1: \
- asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
- break; \
- case ia64_lfhint_nt2: \
- asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
- break; \
- case ia64_lfhint_nta: \
- asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
- break; \
- } \
-})
-
-#define ia64_lfetch_fault(lfhint, y) \
-({ \
- switch (lfhint) { \
- case ia64_lfhint_none: \
- asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
- break; \
- case ia64_lfhint_nt1: \
- asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
- break; \
- case ia64_lfhint_nt2: \
- asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
- break; \
- case ia64_lfhint_nta: \
- asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
- break; \
- } \
-})
-
-#define ia64_lfetch_fault_excl(lfhint, y) \
-({ \
- switch (lfhint) { \
- case ia64_lfhint_none: \
- asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
- break; \
- case ia64_lfhint_nt1: \
- asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
- break; \
- case ia64_lfhint_nt2: \
- asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
- break; \
- case ia64_lfhint_nta: \
- asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
- break; \
- } \
-})
-
-#define ia64_intrin_local_irq_restore(x) \
-do { \
- asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
- "(p6) ssm psr.i;" \
- "(p7) rsm psr.i;;" \
- "(p6) srlz.d" \
- :: "r"((x)) : "p6", "p7", "memory"); \
-} while (0)
-
-#ifdef XEN
-#include <asm/xengcc_intrin.h>
-#endif
-
-#endif /* _ASM_IA64_GCC_INTRIN_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/hw_irq.h b/xen/include/asm-ia64/linux-xen/asm/hw_irq.h
deleted file mode 100644
index 4de23815d0..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/hw_irq.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#ifndef _ASM_IA64_HW_IRQ_H
-#define _ASM_IA64_HW_IRQ_H
-
-/*
- * Copyright (C) 2001-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/profile.h>
-
-#include <asm/machvec.h>
-#include <asm/ptrace.h>
-#include <asm/smp.h>
-
-typedef u8 ia64_vector;
-
-/*
- * 0 special
- *
- * 1,3-14 are reserved from firmware
- *
- * 16-255 (vectored external interrupts) are available
- *
- * 15 spurious interrupt (see IVR)
- *
- * 16 lowest priority, 255 highest priority
- *
- * 15 classes of 16 interrupts each.
- */
-#define IA64_MIN_VECTORED_IRQ 16
-#define IA64_MAX_VECTORED_IRQ 255
-#define IA64_NUM_VECTORS 256
-
-#define AUTO_ASSIGN_IRQ (-1)
-
-#define IA64_SPURIOUS_INT_VECTOR 0x0f
-
-/*
- * Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI.
- */
-#define IA64_CPEP_VECTOR 0x1c /* corrected platform error polling vector */
-#define IA64_CMCP_VECTOR 0x1d /* corrected machine-check polling vector */
-#define IA64_CPE_VECTOR 0x1e /* corrected platform error interrupt vector */
-#define IA64_CMC_VECTOR 0x1f /* corrected machine-check interrupt vector */
-/*
- * Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
- */
-#define IA64_FIRST_DEVICE_VECTOR 0x30
-#define IA64_LAST_DEVICE_VECTOR 0xe7
-#define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1)
-
-#define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */
-#define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */
-#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
-#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
-#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */
-#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */
-
-/* Used for encoding redirected irqs */
-
-#define IA64_IRQ_REDIRECTED (1 << 31)
-
-/* IA64 inter-cpu interrupt related definitions */
-
-#define IA64_IPI_DEFAULT_BASE_ADDR 0xfee00000
-
-/* Delivery modes for inter-cpu interrupts */
-enum {
- IA64_IPI_DM_INT = 0x0, /* pend an external interrupt */
- IA64_IPI_DM_PMI = 0x2, /* pend a PMI */
- IA64_IPI_DM_NMI = 0x4, /* pend an NMI (vector 2) */
- IA64_IPI_DM_INIT = 0x5, /* pend an INIT interrupt */
- IA64_IPI_DM_EXTINT = 0x7, /* pend an 8259-compatible interrupt. */
-};
-
-extern __u8 isa_irq_to_vector_map[16];
-#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
-
-extern int assign_irq_vector (int irq); /* allocate a free vector */
-extern void free_irq_vector (int vector);
-extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
-extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
-#ifdef XEN
-extern int xen_do_IRQ(ia64_vector vector);
-extern int setup_vector(unsigned int vec, struct irqaction *action);
-#endif
-
-static inline void
-hw_resend_irq (hw_irq_controller *h, unsigned int vector)
-{
- platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
-}
-
-/*
- * Default implementations for the irq-descriptor API:
- */
-
-extern irq_desc_t irq_desc[NR_IRQS];
-
-#ifndef CONFIG_IA64_GENERIC
-static inline unsigned int
-__ia64_local_vector_to_irq (ia64_vector vec)
-{
- return (unsigned int) vec;
-}
-#endif
-
-/*
- * Next follows the irq descriptor interface. On IA-64, each CPU supports 256 interrupt
- * vectors. On smaller systems, there is a one-to-one correspondence between interrupt
- * vectors and the Linux irq numbers. However, larger systems may have multiple interrupt
- * domains meaning that the translation from vector number to irq number depends on the
- * interrupt domain that a CPU belongs to. This API abstracts such platform-dependent
- * differences and provides a uniform means to translate between vector and irq numbers
- * and to obtain the irq descriptor for a given irq number.
- */
-
-/* Return a pointer to the irq descriptor for IRQ. */
-static inline irq_desc_t *
-irq_descp (int irq)
-{
- return irq_desc + irq;
-}
-
-/*
- * Convert the local IA-64 vector to the corresponding irq number. This translation is
- * done in the context of the interrupt domain that the currently executing CPU belongs
- * to.
- */
-static inline unsigned int
-local_vector_to_irq (ia64_vector vec)
-{
- return platform_local_vector_to_irq(vec);
-}
-
-#endif /* _ASM_IA64_HW_IRQ_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/ia64regs.h b/xen/include/asm-ia64/linux-xen/asm/ia64regs.h
deleted file mode 100644
index a90db1022a..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/ia64regs.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2002,2003 Intel Corp.
- * Jun Nakajima <jun.nakajima@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- */
-
-#ifndef _ASM_IA64_IA64REGS_H
-#define _ASM_IA64_IA64REGS_H
-
-/*
- * Register Names for getreg() and setreg().
- *
- * The "magic" numbers happen to match the values used by the Intel compiler's
- * getreg()/setreg() intrinsics.
- */
-
-/* Special Registers */
-
-#define _IA64_REG_IP 1016 /* getreg only */
-#define _IA64_REG_PSR 1019
-#define _IA64_REG_PSR_L 1019
-
-/* General Integer Registers */
-
-#define _IA64_REG_GP 1025 /* R1 */
-#define _IA64_REG_R8 1032 /* R8 */
-#define _IA64_REG_R9 1033 /* R9 */
-#define _IA64_REG_SP 1036 /* R12 */
-#define _IA64_REG_TP 1037 /* R13 */
-
-/* Application Registers */
-
-#define _IA64_REG_AR_KR0 3072
-#define _IA64_REG_AR_KR1 3073
-#define _IA64_REG_AR_KR2 3074
-#define _IA64_REG_AR_KR3 3075
-#define _IA64_REG_AR_KR4 3076
-#define _IA64_REG_AR_KR5 3077
-#define _IA64_REG_AR_KR6 3078
-#define _IA64_REG_AR_KR7 3079
-#define _IA64_REG_AR_RSC 3088
-#define _IA64_REG_AR_BSP 3089
-#define _IA64_REG_AR_BSPSTORE 3090
-#define _IA64_REG_AR_RNAT 3091
-#define _IA64_REG_AR_FCR 3093
-#define _IA64_REG_AR_EFLAG 3096
-#define _IA64_REG_AR_CSD 3097
-#define _IA64_REG_AR_SSD 3098
-#define _IA64_REG_AR_CFLAG 3099
-#define _IA64_REG_AR_FSR 3100
-#define _IA64_REG_AR_FIR 3101
-#define _IA64_REG_AR_FDR 3102
-#define _IA64_REG_AR_CCV 3104
-#define _IA64_REG_AR_UNAT 3108
-#define _IA64_REG_AR_FPSR 3112
-#define _IA64_REG_AR_ITC 3116
-#define _IA64_REG_AR_PFS 3136
-#define _IA64_REG_AR_LC 3137
-#define _IA64_REG_AR_EC 3138
-
-/* Control Registers */
-
-#define _IA64_REG_CR_DCR 4096
-#define _IA64_REG_CR_ITM 4097
-#define _IA64_REG_CR_IVA 4098
-#define _IA64_REG_CR_PTA 4104
-#define _IA64_REG_CR_IPSR 4112
-#define _IA64_REG_CR_ISR 4113
-#define _IA64_REG_CR_IIP 4115
-#define _IA64_REG_CR_IFA 4116
-#define _IA64_REG_CR_ITIR 4117
-#define _IA64_REG_CR_IIPA 4118
-#define _IA64_REG_CR_IFS 4119
-#define _IA64_REG_CR_IIM 4120
-#define _IA64_REG_CR_IHA 4121
-#define _IA64_REG_CR_LID 4160
-#define _IA64_REG_CR_IVR 4161 /* getreg only */
-#define _IA64_REG_CR_TPR 4162
-#define _IA64_REG_CR_EOI 4163
-#define _IA64_REG_CR_IRR0 4164 /* getreg only */
-#define _IA64_REG_CR_IRR1 4165 /* getreg only */
-#define _IA64_REG_CR_IRR2 4166 /* getreg only */
-#define _IA64_REG_CR_IRR3 4167 /* getreg only */
-#define _IA64_REG_CR_ITV 4168
-#define _IA64_REG_CR_PMV 4169
-#define _IA64_REG_CR_CMCV 4170
-#define _IA64_REG_CR_LRR0 4176
-#define _IA64_REG_CR_LRR1 4177
-
-/* Indirect Registers for getindreg() and setindreg() */
-
-#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
-#define _IA64_REG_INDR_DBR 9001
-#define _IA64_REG_INDR_IBR 9002
-#define _IA64_REG_INDR_PKR 9003
-#define _IA64_REG_INDR_PMC 9004
-#define _IA64_REG_INDR_PMD 9005
-#define _IA64_REG_INDR_RR 9006
-
-#ifdef XEN
-#include <asm/xenia64regs.h>
-#endif
-
-#endif /* _ASM_IA64_IA64REGS_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/io.h b/xen/include/asm-ia64/linux-xen/asm/io.h
deleted file mode 100644
index 2b7b05ae01..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/io.h
+++ /dev/null
@@ -1,490 +0,0 @@
-#ifndef _ASM_IA64_IO_H
-#define _ASM_IA64_IO_H
-
-/*
- * This file contains the definitions for the emulated IO instructions
- * inb/inw/inl/outb/outw/outl and the "string versions" of the same
- * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
- * versions of the single-IO instructions (inb_p/inw_p/..).
- *
- * This file is not meant to be obfuscating: it's just complicated to
- * (a) handle it all in a way that makes gcc able to optimize it as
- * well as possible and (b) trying to avoid writing the same thing
- * over and over again with slight variations and possibly making a
- * mistake somewhere.
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
- */
-
-/* We don't use IO slowdowns on the ia64, but.. */
-#define __SLOW_DOWN_IO do { } while (0)
-#define SLOW_DOWN_IO do { } while (0)
-
-#ifdef XEN
-#include <asm/xensystem.h>
-#else
-#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */
-#endif
-
-/*
- * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
- * large machines may have multiple other I/O spaces so we can't place any a priori limit
- * on IO_SPACE_LIMIT. These additional spaces are described in ACPI.
- */
-#define IO_SPACE_LIMIT 0xffffffffffffffffUL
-
-#define MAX_IO_SPACES_BITS 4
-#define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
-#define IO_SPACE_BITS 24
-#define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
-
-#define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
-#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
-#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
-
-#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | (p & 0xfff))
-
-#ifdef XEN
-/* Offset to IO port; do not catch error. */
-#define IO_SPACE_SPARSE_DECODING(off) ((((off) >> 12) << 2) | ((off) & 0x3))
-#define IO_SPACE_SPARSE_PORTS_PER_PAGE (0x4 << (PAGE_SHIFT - 12))
-#endif
-
-struct io_space {
- unsigned long mmio_base; /* base in MMIO space */
- int sparse;
-};
-
-extern struct io_space io_space[];
-extern unsigned int num_io_spaces;
-
-# ifdef __KERNEL__
-
-/*
- * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
- * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap)
- * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port)
- *
- * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
- * code that uses bare port numbers without the prerequisite pci_iomap().
- */
-#define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
-#define PIO_MASK (PIO_OFFSET - 1)
-#define PIO_RESERVED __IA64_UNCACHED_OFFSET
-#define HAVE_ARCH_PIO_SIZE
-
-#include <asm/intrinsics.h>
-#include <asm/machvec.h>
-#include <asm/page.h>
-#include <asm/system.h>
-#include <asm-generic/iomap.h>
-
-
-#ifndef XEN
-/*
- * Change virtual addresses to physical addresses and vv.
- */
-static inline unsigned long
-virt_to_maddr (volatile void *address)
-{
- return (unsigned long) address - PAGE_OFFSET;
-}
-#endif
-
-static inline void*
-maddr_to_virt (unsigned long address)
-{
- return (void *) (address + PAGE_OFFSET);
-}
-
-
-#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
-extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */
-
-/*
- * The following two macros are deprecated and scheduled for removal.
- * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
- */
-#define bus_to_virt maddr_to_virt
-#define virt_to_bus virt_to_maddr
-#define page_to_bus page_to_maddr
-
-# endif /* KERNEL */
-
-/*
- * Memory fence w/accept. This should never be used in code that is
- * not IA-64 specific.
- */
-#define __ia64_mf_a() ia64_mfa()
-
-/**
- * ___ia64_mmiowb - I/O write barrier
- *
- * Ensure ordering of I/O space writes. This will make sure that writes
- * following the barrier will arrive after all previous writes. For most
- * ia64 platforms, this is a simple 'mf.a' instruction.
- *
- * See Documentation/DocBook/deviceiobook.tmpl for more information.
- */
-static inline void ___ia64_mmiowb(void)
-{
- ia64_mfa();
-}
-
-static inline void*
-__ia64_mk_io_addr (unsigned long port)
-{
- struct io_space *space;
- unsigned long offset;
-
- space = &io_space[IO_SPACE_NR(port)];
- port = IO_SPACE_PORT(port);
- if (space->sparse)
- offset = IO_SPACE_SPARSE_ENCODING(port);
- else
- offset = port;
-
- return (void *) (space->mmio_base | offset);
-}
-
-#define __ia64_inb ___ia64_inb
-#define __ia64_inw ___ia64_inw
-#define __ia64_inl ___ia64_inl
-#define __ia64_outb ___ia64_outb
-#define __ia64_outw ___ia64_outw
-#define __ia64_outl ___ia64_outl
-#define __ia64_readb ___ia64_readb
-#define __ia64_readw ___ia64_readw
-#define __ia64_readl ___ia64_readl
-#define __ia64_readq ___ia64_readq
-#define __ia64_readb_relaxed ___ia64_readb
-#define __ia64_readw_relaxed ___ia64_readw
-#define __ia64_readl_relaxed ___ia64_readl
-#define __ia64_readq_relaxed ___ia64_readq
-#define __ia64_writeb ___ia64_writeb
-#define __ia64_writew ___ia64_writew
-#define __ia64_writel ___ia64_writel
-#define __ia64_writeq ___ia64_writeq
-#define __ia64_mmiowb ___ia64_mmiowb
-
-/*
- * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
- * that the access has completed before executing other I/O accesses. Since we're doing
- * the accesses through an uncachable (UC) translation, the CPU will execute them in
- * program order. However, we still need to tell the compiler not to shuffle them around
- * during optimization, which is why we use "volatile" pointers.
- */
-
-static inline unsigned int
-___ia64_inb (unsigned long port)
-{
- volatile unsigned char *addr = __ia64_mk_io_addr(port);
- unsigned char ret;
-
- ret = *addr;
- __ia64_mf_a();
- return ret;
-}
-
-static inline unsigned int
-___ia64_inw (unsigned long port)
-{
- volatile unsigned short *addr = __ia64_mk_io_addr(port);
- unsigned short ret;
-
- ret = *addr;
- __ia64_mf_a();
- return ret;
-}
-
-static inline unsigned int
-___ia64_inl (unsigned long port)
-{
- volatile unsigned int *addr = __ia64_mk_io_addr(port);
- unsigned int ret;
-
- ret = *addr;
- __ia64_mf_a();
- return ret;
-}
-
-static inline void
-___ia64_outb (unsigned char val, unsigned long port)
-{
- volatile unsigned char *addr = __ia64_mk_io_addr(port);
-
- *addr = val;
- __ia64_mf_a();
-}
-
-static inline void
-___ia64_outw (unsigned short val, unsigned long port)
-{
- volatile unsigned short *addr = __ia64_mk_io_addr(port);
-
- *addr = val;
- __ia64_mf_a();
-}
-
-static inline void
-___ia64_outl (unsigned int val, unsigned long port)
-{
- volatile unsigned int *addr = __ia64_mk_io_addr(port);
-
- *addr = val;
- __ia64_mf_a();
-}
-
-static inline void
-__insb (unsigned long port, void *dst, unsigned long count)
-{
- unsigned char *dp = dst;
-
- while (count--)
- *dp++ = platform_inb(port);
-}
-
-static inline void
-__insw (unsigned long port, void *dst, unsigned long count)
-{
- unsigned short *dp = dst;
-
- while (count--)
- *dp++ = platform_inw(port);
-}
-
-static inline void
-__insl (unsigned long port, void *dst, unsigned long count)
-{
- unsigned int *dp = dst;
-
- while (count--)
- *dp++ = platform_inl(port);
-}
-
-static inline void
-__outsb (unsigned long port, const void *src, unsigned long count)
-{
- const unsigned char *sp = src;
-
- while (count--)
- platform_outb(*sp++, port);
-}
-
-static inline void
-__outsw (unsigned long port, const void *src, unsigned long count)
-{
- const unsigned short *sp = src;
-
- while (count--)
- platform_outw(*sp++, port);
-}
-
-static inline void
-__outsl (unsigned long port, const void *src, unsigned long count)
-{
- const unsigned int *sp = src;
-
- while (count--)
- platform_outl(*sp++, port);
-}
-
-/*
- * Unfortunately, some platforms are broken and do not follow the IA-64 architecture
- * specification regarding legacy I/O support. Thus, we have to make these operations
- * platform dependent...
- */
-#define __inb platform_inb
-#define __inw platform_inw
-#define __inl platform_inl
-#define __outb platform_outb
-#define __outw platform_outw
-#define __outl platform_outl
-#define __mmiowb platform_mmiowb
-
-#define inb(p) __inb(p)
-#define inw(p) __inw(p)
-#define inl(p) __inl(p)
-#define insb(p,d,c) __insb(p,d,c)
-#define insw(p,d,c) __insw(p,d,c)
-#define insl(p,d,c) __insl(p,d,c)
-#define outb(v,p) __outb(v,p)
-#define outw(v,p) __outw(v,p)
-#define outl(v,p) __outl(v,p)
-#define outsb(p,s,c) __outsb(p,s,c)
-#define outsw(p,s,c) __outsw(p,s,c)
-#define outsl(p,s,c) __outsl(p,s,c)
-#define mmiowb() __mmiowb()
-
-/*
- * The address passed to these functions are ioremap()ped already.
- *
- * We need these to be machine vectors since some platforms don't provide
- * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
- * a good idea). Writes are ok though for all existing ia64 platforms (and
- * hopefully it'll stay that way).
- */
-static inline unsigned char
-___ia64_readb (const volatile void __iomem *addr)
-{
- return *(volatile unsigned char __force *)addr;
-}
-
-static inline unsigned short
-___ia64_readw (const volatile void __iomem *addr)
-{
- return *(volatile unsigned short __force *)addr;
-}
-
-static inline unsigned int
-___ia64_readl (const volatile void __iomem *addr)
-{
- return *(volatile unsigned int __force *) addr;
-}
-
-static inline unsigned long
-___ia64_readq (const volatile void __iomem *addr)
-{
- return *(volatile unsigned long __force *) addr;
-}
-
-static inline void
-__writeb (unsigned char val, volatile void __iomem *addr)
-{
- *(volatile unsigned char __force *) addr = val;
-}
-
-static inline void
-__writew (unsigned short val, volatile void __iomem *addr)
-{
- *(volatile unsigned short __force *) addr = val;
-}
-
-static inline void
-__writel (unsigned int val, volatile void __iomem *addr)
-{
- *(volatile unsigned int __force *) addr = val;
-}
-
-static inline void
-__writeq (unsigned long val, volatile void __iomem *addr)
-{
- *(volatile unsigned long __force *) addr = val;
-}
-
-#define __readb platform_readb
-#define __readw platform_readw
-#define __readl platform_readl
-#define __readq platform_readq
-#define __readb_relaxed platform_readb_relaxed
-#define __readw_relaxed platform_readw_relaxed
-#define __readl_relaxed platform_readl_relaxed
-#define __readq_relaxed platform_readq_relaxed
-
-#define readb(a) __readb((a))
-#define readw(a) __readw((a))
-#define readl(a) __readl((a))
-#define readq(a) __readq((a))
-#define readb_relaxed(a) __readb_relaxed((a))
-#define readw_relaxed(a) __readw_relaxed((a))
-#define readl_relaxed(a) __readl_relaxed((a))
-#define readq_relaxed(a) __readq_relaxed((a))
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-#define __raw_readq readq
-#define __raw_readb_relaxed readb_relaxed
-#define __raw_readw_relaxed readw_relaxed
-#define __raw_readl_relaxed readl_relaxed
-#define __raw_readq_relaxed readq_relaxed
-#define writeb(v,a) __writeb((v), (a))
-#define writew(v,a) __writew((v), (a))
-#define writel(v,a) __writel((v), (a))
-#define writeq(v,a) __writeq((v), (a))
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-#define __raw_writeq writeq
-
-#ifndef inb_p
-# define inb_p inb
-#endif
-#ifndef inw_p
-# define inw_p inw
-#endif
-#ifndef inl_p
-# define inl_p inl
-#endif
-
-#ifndef outb_p
-# define outb_p outb
-#endif
-#ifndef outw_p
-# define outw_p outw
-#endif
-#ifndef outl_p
-# define outl_p outl
-#endif
-
-/*
- * An "address" in IO memory space is not clearly either an integer or a pointer. We will
- * accept both, thus the casts.
- *
- * On ia-64, we access the physical I/O memory space through the uncached kernel region.
- */
-static inline void __iomem *
-ioremap (unsigned long offset, unsigned long size)
-{
- return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset));
-}
-
-static inline void
-iounmap (volatile void __iomem *addr)
-{
-}
-
-#define ioremap_nocache(o,s) ioremap(o,s)
-
-# ifdef __KERNEL__
-
-/*
- * String version of IO memory access ops:
- */
-extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
-extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
-extern void memset_io(volatile void __iomem *s, int c, long n);
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
-# endif /* __KERNEL__ */
-
-/*
- * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
- * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
- * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
- * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
- * over BIO-level virtual merging.
- */
-extern unsigned long ia64_max_iommu_merge_mask;
-#if 1
-#define BIO_VMERGE_BOUNDARY 0
-#else
-/*
- * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
- * replaced by dma_merge_mask() or something of that sort. Note: the only way
- * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
- * expanded into:
- *
- * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
- *
- * which is precisely what we want.
- */
-#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
-#endif
-
-#endif /* _ASM_IA64_IO_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/iosapic.h b/xen/include/asm-ia64/linux-xen/asm/iosapic.h
deleted file mode 100644
index 899c947d21..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/iosapic.h
+++ /dev/null
@@ -1,198 +0,0 @@
-#ifndef __ASM_IA64_IOSAPIC_H
-#define __ASM_IA64_IOSAPIC_H
-
-#define IOSAPIC_REG_SELECT 0x0
-#define IOSAPIC_WINDOW 0x10
-#define IOSAPIC_EOI 0x40
-
-#define IOSAPIC_VERSION 0x1
-
-/*
- * Redirection table entry
- */
-#define IOSAPIC_RTE_LOW(i) (0x10+i*2)
-#define IOSAPIC_RTE_HIGH(i) (0x11+i*2)
-
-#define IOSAPIC_DEST_SHIFT 16
-
-/*
- * Delivery mode
- */
-#define IOSAPIC_DELIVERY_SHIFT 8
-#define IOSAPIC_FIXED 0x0
-#define IOSAPIC_LOWEST_PRIORITY 0x1
-#define IOSAPIC_PMI 0x2
-#define IOSAPIC_NMI 0x4
-#define IOSAPIC_INIT 0x5
-#define IOSAPIC_EXTINT 0x7
-
-/*
- * Interrupt polarity
- */
-#define IOSAPIC_POLARITY_SHIFT 13
-#define IOSAPIC_POL_HIGH 0
-#define IOSAPIC_POL_LOW 1
-
-/*
- * Trigger mode
- */
-#define IOSAPIC_TRIGGER_SHIFT 15
-#define IOSAPIC_EDGE 0
-#define IOSAPIC_LEVEL 1
-
-/*
- * Mask bit
- */
-
-#define IOSAPIC_MASK_SHIFT 16
-#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
-
-#ifndef __ASSEMBLY__
-
-#ifdef CONFIG_IOSAPIC
-
-#define NR_IOSAPICS 256
-
-#ifdef XEN
-struct iosapic {
- char __iomem *addr; /* base address of IOSAPIC */
- unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */
- unsigned short num_rte; /* number of RTE in this IOSAPIC */
- int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
- unsigned int id; /* APIC ID */
-#ifdef CONFIG_NUMA
- unsigned short node; /* numa node association via pxm */
-#endif
-};
-
-extern struct iosapic iosapic_lists[NR_IOSAPICS];
-
-static inline int find_iosapic_by_addr(unsigned long addr)
-{
- int i;
-
- for (i = 0; i < NR_IOSAPICS; i++) {
- if ((unsigned long)iosapic_lists[i].addr == addr)
- return i;
- }
-
- return -1;
-}
-#endif
-
-
-static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
-{
-#ifdef XEN
- if(iommu_enabled && (reg >= 10)){
- int apic = find_iosapic_by_addr((unsigned long)iosapic);
- return io_apic_read_remap_rte(apic, reg);
- }
-#endif
- writel(reg, iosapic + IOSAPIC_REG_SELECT);
- return readl(iosapic + IOSAPIC_WINDOW);
-}
-
-static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
-{
-#ifdef XEN
- if (iommu_enabled && (reg >= 10)){
- int apic = find_iosapic_by_addr((unsigned long)iosapic);
- iommu_update_ire_from_apic(apic, reg, val);
- return;
- }
-#endif
- writel(reg, iosapic + IOSAPIC_REG_SELECT);
- writel(val, iosapic + IOSAPIC_WINDOW);
-}
-
-static inline void iosapic_eoi(char __iomem *iosapic, u32 vector)
-{
- writel(vector, iosapic + IOSAPIC_EOI);
-}
-
-extern void __init iosapic_system_init (int pcat_compat);
-#ifndef XEN
-extern int __devinit iosapic_init (unsigned long address,
- unsigned int gsi_base);
-#else
-extern int __devinit iosapic_init (unsigned long address,
- unsigned int gsi_base, unsigned int id);
-#endif
-#ifdef CONFIG_HOTPLUG
-extern int iosapic_remove (unsigned int gsi_base);
-#else
-#define iosapic_remove(gsi_base) (-EINVAL)
-#endif /* CONFIG_HOTPLUG */
-extern int gsi_to_vector (unsigned int gsi);
-extern int gsi_to_irq (unsigned int gsi);
-extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity,
- unsigned long trigger);
-extern void iosapic_unregister_intr (unsigned int irq);
-extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
- unsigned long polarity,
- unsigned long trigger);
-extern int __init iosapic_register_platform_intr (u32 int_type,
- unsigned int gsi,
- int pmi_vector,
- u16 eid, u16 id,
- unsigned long polarity,
- unsigned long trigger);
-extern unsigned int iosapic_version (char __iomem *addr);
-
-#ifdef CONFIG_NUMA
-extern void __devinit map_iosapic_to_node (unsigned int, int);
-#endif
-#else
-#define iosapic_system_init(pcat_compat) do { } while (0)
-#define iosapic_init(address,gsi_base) (-EINVAL)
-#define iosapic_remove(gsi_base) (-ENODEV)
-#define iosapic_register_intr(gsi,polarity,trigger) (gsi)
-#define iosapic_unregister_intr(irq) do { } while (0)
-#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger) do { } while (0)
-#define iosapic_register_platform_intr(type,gsi,pmi,eid,id, \
- polarity,trigger) (gsi)
-#endif
-
-#ifdef XEN
-#define move_irq(x)
-
-#ifdef nop
-#undef nop
-#endif
-
-struct rte_entry {
- union {
- struct {
- u32 vector : 8,
- delivery_mode : 3,
- dest_mode : 1, /* always 0 for iosapic */
- delivery_status : 1,
- polarity : 1,
- __reserved0 : 1,
- trigger : 1,
- mask : 1,
- __reserved1 : 15;
- } lo;
- struct {
- u32 __reserved2 : 16,
- eid : 8,
- id : 8;
- } hi;
- u32 val;
- };
-};
-
-#define IOSAPIC_RTEINDEX(reg) (((reg) - 0x10) >> 1)
-extern unsigned long ia64_vector_mask[];
-extern unsigned long ia64_xen_vector[];
-
-int iosapic_get_nr_iosapics(void);
-int iosapic_get_nr_pins(int index);
-#endif /* XEN */
-
-#define IO_APIC_BASE(idx) ((unsigned int *)iosapic_lists[idx].addr)
-#define IO_APIC_ID(idx) (iosapic_lists[idx].id)
-
-# endif /* !__ASSEMBLY__ */
-#endif /* __ASM_IA64_IOSAPIC_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/irq.h b/xen/include/asm-ia64/linux-xen/asm/irq.h
deleted file mode 100644
index 9ce12e6824..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/irq.h
+++ /dev/null
@@ -1,80 +0,0 @@
-#ifndef _ASM_IA64_IRQ_H
-#define _ASM_IA64_IRQ_H
-
-/*
- * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- *
- * 11/24/98 S.Eranian updated TIMER_IRQ and irq_canonicalize
- * 01/20/99 S.Eranian added keyboard interrupt
- * 02/29/00 D.Mosberger moved most things into hw_irq.h
- */
-
-#define NR_VECTORS 256
-#define NR_IRQS 256
-
-#ifdef XEN
-#include <xen/hvm/irq.h>
-
-struct arch_irq_desc {
- int vector;
- unsigned int depth;
- cpumask_var_t cpu_mask;
-};
-
-struct arch_pirq {
- struct hvm_pirq_dpci dpci;
-};
-
-int init_irq_data(void);
-#endif
-
-static __inline__ int
-irq_canonicalize (int irq)
-{
- /*
- * We do the legacy thing here of pretending that irqs < 16
- * are 8259 irqs. This really shouldn't be necessary at all,
- * but we keep it here as serial.c still uses it...
- */
- return ((irq == 2) ? 9 : irq);
-}
-
-extern void disable_irq (unsigned int);
-extern void disable_irq_nosync (unsigned int);
-extern void enable_irq (unsigned int);
-extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
-
-#ifdef CONFIG_SMP
-extern void move_irq(int irq);
-#else
-#define move_irq(irq)
-#endif
-
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
-extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
-
-#ifdef XEN
-static inline unsigned int irq_to_vector(int);
-extern int setup_irq_vector(unsigned int, struct irqaction *);
-extern void release_irq_vector(unsigned int);
-extern int request_irq_vector(unsigned int vector,
- void (*handler)(int, void *, struct cpu_user_regs *),
- unsigned long irqflags, const char * devname, void *dev_id);
-
-#define create_irq(x) assign_irq_vector(AUTO_ASSIGN_IRQ)
-#define destroy_irq(x) free_irq_vector(x)
-
-#define irq_complete_move(x) do {} \
- while(!x)
-
-#define domain_pirq_to_irq(d, irq) (irq) /* domain_irq_to_vector(d, irq) */
-
-#define hvm_domain_use_pirq(d, info) 0
-#endif
-
-#endif /* _ASM_IA64_IRQ_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/kregs.h b/xen/include/asm-ia64/linux-xen/asm/kregs.h
deleted file mode 100644
index 8e0795f0c8..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/kregs.h
+++ /dev/null
@@ -1,167 +0,0 @@
-#ifndef _ASM_IA64_KREGS_H
-#define _ASM_IA64_KREGS_H
-
-/*
- * Copyright (C) 2001-2002 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-/*
- * This file defines the kernel register usage convention used by Linux/ia64.
- */
-
-/*
- * Kernel registers:
- */
-#define IA64_KR_IO_BASE 0 /* ar.k0: legacy I/O base address */
-#define IA64_KR_TSSD 1 /* ar.k1: IVE uses this as the TSSD */
-#define IA64_KR_PER_CPU_DATA 3 /* ar.k3: physical per-CPU base */
-#define IA64_KR_CURRENT_STACK 4 /* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */
-#define IA64_KR_FPU_OWNER 5 /* ar.k5: fpu-owner (UP only, at the moment) */
-#define IA64_KR_CURRENT 6 /* ar.k6: "current" task pointer */
-#define IA64_KR_PT_BASE 7 /* ar.k7: page table base address (physical) */
-
-#define _IA64_KR_PASTE(x,y) x##y
-#define _IA64_KR_PREFIX(n) _IA64_KR_PASTE(ar.k, n)
-#define IA64_KR(n) _IA64_KR_PREFIX(IA64_KR_##n)
-
-/*
- * Translation registers:
- */
-#define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code & data) */
-#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */
-#define IA64_TR_PERCPU_DATA 1 /* dtr1: percpu data */
-#define IA64_TR_CURRENT_STACK 2 /* dtr2: maps kernel's memory- & register-stacks */
-
-/* Processor status register bits: */
-#define IA64_PSR_BE_BIT 1
-#define IA64_PSR_UP_BIT 2
-#define IA64_PSR_AC_BIT 3
-#define IA64_PSR_MFL_BIT 4
-#define IA64_PSR_MFH_BIT 5
-#define IA64_PSR_IC_BIT 13
-#define IA64_PSR_I_BIT 14
-#define IA64_PSR_PK_BIT 15
-#define IA64_PSR_DT_BIT 17
-#define IA64_PSR_DFL_BIT 18
-#define IA64_PSR_DFH_BIT 19
-#define IA64_PSR_SP_BIT 20
-#define IA64_PSR_PP_BIT 21
-#define IA64_PSR_DI_BIT 22
-#define IA64_PSR_SI_BIT 23
-#define IA64_PSR_DB_BIT 24
-#define IA64_PSR_LP_BIT 25
-#define IA64_PSR_TB_BIT 26
-#define IA64_PSR_RT_BIT 27
-/* The following are not affected by save_flags()/restore_flags(): */
-#define IA64_PSR_CPL0_BIT 32
-#define IA64_PSR_CPL1_BIT 33
-#define IA64_PSR_IS_BIT 34
-#define IA64_PSR_MC_BIT 35
-#define IA64_PSR_IT_BIT 36
-#define IA64_PSR_ID_BIT 37
-#define IA64_PSR_DA_BIT 38
-#define IA64_PSR_DD_BIT 39
-#define IA64_PSR_SS_BIT 40
-#define IA64_PSR_RI_BIT 41
-#define IA64_PSR_ED_BIT 43
-#define IA64_PSR_BN_BIT 44
-#define IA64_PSR_IA_BIT 45
-
-/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
- execve(). Only list flags here that need to be cleared/set for BOTH clone2() and
- execve(). */
-#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
- IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
- IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)
-#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH | IA64_PSR_SP)
-
-#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
-#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
-#define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT)
-#define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT)
-#define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT)
-#define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT)
-#define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT)
-#define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT)
-#define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT)
-#define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT)
-#define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT)
-#define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT)
-#define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT)
-#define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT)
-#define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT)
-#define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT)
-#define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT)
-#define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT)
-#define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT)
-/* The following are not affected by save_flags()/restore_flags(): */
-#define IA64_PSR_CPL (__IA64_UL(3) << IA64_PSR_CPL0_BIT)
-#define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT)
-#define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT)
-#define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT)
-#define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT)
-#define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT)
-#define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT)
-#define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT)
-#define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT)
-#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT)
-#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT)
-#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
-
-/* User mask bits: */
-#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
-
-/* Default Control Register */
-#define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */
-#define IA64_DCR_BE_BIT 1 /* big-endian default */
-#define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */
-#define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */
-#define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */
-#define IA64_DCR_DK_BIT 10 /* defer key miss faults */
-#define IA64_DCR_DX_BIT 11 /* defer key permission faults */
-#define IA64_DCR_DR_BIT 12 /* defer access right faults */
-#define IA64_DCR_DA_BIT 13 /* defer access bit faults */
-#define IA64_DCR_DD_BIT 14 /* defer debug faults */
-
-#define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT)
-#define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT)
-#define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT)
-#define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT)
-#define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT)
-#define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT)
-#define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT)
-#define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT)
-#define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT)
-#define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT)
-
-/* Interrupt Status Register */
-#define IA64_ISR_X_BIT 32 /* execute access */
-#define IA64_ISR_W_BIT 33 /* write access */
-#define IA64_ISR_R_BIT 34 /* read access */
-#define IA64_ISR_NA_BIT 35 /* non-access */
-#define IA64_ISR_SP_BIT 36 /* speculative load exception */
-#define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */
-#define IA64_ISR_IR_BIT 38 /* invalid register frame exception */
-#define IA64_ISR_CODE_MASK 0xf
-
-#define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT)
-#define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT)
-#define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT)
-#define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT)
-#define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT)
-#define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT)
-#define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT)
-
-/* ISR code field for non-access instructions */
-#define IA64_ISR_CODE_TPA 0
-#define IA64_ISR_CODE_FC 1
-#define IA64_ISR_CODE_PROBE 2
-#define IA64_ISR_CODE_TAK 3
-#define IA64_ISR_CODE_LFETCH 4
-#define IA64_ISR_CODE_PROBEF 5
-
-#ifdef XEN
-#include <asm/xenkregs.h>
-#endif
-
-#endif /* _ASM_IA64_kREGS_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/machvec.h b/xen/include/asm-ia64/linux-xen/asm/machvec.h
deleted file mode 100644
index 4822b98933..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/machvec.h
+++ /dev/null
@@ -1,573 +0,0 @@
-/*
- * Machine vector for IA-64.
- *
- * Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
- * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
- * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-#ifndef _ASM_IA64_MACHVEC_H
-#define _ASM_IA64_MACHVEC_H
-
-#include <linux/config.h>
-#include <linux/types.h>
-
-/* forward declarations: */
-struct device;
-struct pt_regs;
-struct scatterlist;
-struct page;
-struct mm_struct;
-struct pci_bus;
-
-typedef void ia64_mv_setup_t (char **);
-typedef void ia64_mv_cpu_init_t (void);
-typedef void ia64_mv_irq_init_t (void);
-typedef void ia64_mv_send_ipi_t (int, int, int, int);
-typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
-typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long);
-typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
-typedef unsigned int ia64_mv_local_vector_to_irq (u8);
-typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
-typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
- u8 size);
-typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
- u8 size);
-typedef void ia64_mv_kernel_launch_event_t(void);
-
-/* DMA-mapping interface: */
-typedef void ia64_mv_dma_init (void);
-typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int);
-typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
-typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
-typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
-typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
-typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
-typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
-typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
-typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
-typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
-typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
-typedef int ia64_mv_dma_supported (struct device *, u64);
-
-/*
- * WARNING: The legacy I/O space is _architected_. Platforms are
- * expected to follow this architected model (see Section 10.7 in the
- * IA-64 Architecture Software Developer's Manual). Unfortunately,
- * some broken machines do not follow that model, which is why we have
- * to make the inX/outX operations part of the machine vector.
- * Platform designers should follow the architected model whenever
- * possible.
- */
-typedef unsigned int ia64_mv_inb_t (unsigned long);
-typedef unsigned int ia64_mv_inw_t (unsigned long);
-typedef unsigned int ia64_mv_inl_t (unsigned long);
-typedef void ia64_mv_outb_t (unsigned char, unsigned long);
-typedef void ia64_mv_outw_t (unsigned short, unsigned long);
-typedef void ia64_mv_outl_t (unsigned int, unsigned long);
-typedef void ia64_mv_mmiowb_t (void);
-typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
-typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
-typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
-typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
-typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
-typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
-typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
-typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
-
-static inline void
-machvec_noop (void)
-{
-}
-
-static inline void
-machvec_noop_mm (struct mm_struct *mm)
-{
-}
-
-#ifdef XEN
-#include <xen/lib.h>
-/*
- * These should never get called, they just fill out the machine
- * vectors and make the compiler happy.
- */
-static inline void*
-machvec_noop_dma_alloc_coherent (struct device *dev, size_t size,
- dma_addr_t *addr, int dir)
-{
- panic("%s() called", __FUNCTION__);
- return (void *)0;
-}
-
-static inline void
-machvec_noop_dma_free_coherent (struct device *dev, size_t size,
- void *vaddr, dma_addr_t handle)
-{
- panic("%s() called", __FUNCTION__);
-}
-
-static inline dma_addr_t
-machvec_noop_dma_map_single (struct device *dev, void *addr,
- size_t size, int dir)
-{
- panic("%s() called", __FUNCTION__);
- return (dma_addr_t)0;
-}
-
-static inline void
-machvec_noop_dma_unmap_single (struct device *dev, dma_addr_t vaddr,
- size_t size, int dir)
-{
- panic("%s() called", __FUNCTION__);
-}
-
-static inline int
-machvec_noop_dma_map_sg (struct device *dev, struct scatterlist *sglist,
- int nents, int dir)
-{
- panic("%s() called", __FUNCTION__);
- return 0;
-}
-
-static inline void
-machvec_noop_dma_unmap_sg (struct device *dev, struct scatterlist *sglist,
- int nents, int dir)
-{
- panic("%s() called", __FUNCTION__);
-}
-
-static inline void
-machvec_noop_dma_sync_single_for_cpu (struct device *dev, dma_addr_t vaddr,
- size_t size, int dir)
-{
- panic("%s() called", __FUNCTION__);
-}
-
-#define machvec_noop_dma_sync_single_for_device \
- machvec_noop_dma_sync_single_for_cpu
-
-static inline void
-machvec_noop_dma_sync_sg_for_cpu (struct device *dev,
- struct scatterlist *sglist,
- int nents, int dir)
-{
- panic("%s() called", __FUNCTION__);
-}
-
-#define machvec_noop_dma_sync_sg_for_device \
- machvec_noop_dma_sync_sg_for_cpu
-
-static inline int
-machvec_noop_dma_mapping_error (dma_addr_t dma_addr)
-{
- panic("%s() called", __FUNCTION__);
- return 1;
-}
-
-static inline int
-machvec_noop_dma_supported (struct device *dev, u64 mask)
-{
- panic("%s() called", __FUNCTION__);
- return 0;
-}
-
-static inline char*
-machvec_noop_pci_get_legacy_mem (struct pci_bus *bus)
-{
- panic("%s() called", __FUNCTION__);
- return 0;
-}
-
-static inline int
-machvec_noop_pci_legacy_read (struct pci_bus *bus, u16 port, u32 *val, u8 size)
-{
- panic("%s() called", __FUNCTION__);
- return 0;
-}
-
-static inline int
-machvec_noop_pci_legacy_write (struct pci_bus *bus, u16 port, u32 val, u8 size)
-{
- panic("%s() called", __FUNCTION__);
- return 0;
-}
-
-typedef int ia64_mv_fw_init_t (void *d, void *bp, void *tables);
-
-static inline int
-machvec_noop_platform_fw_init (void *d, void *bp, void *tables)
-{
- return 0;
-}
-
-#endif
-
-extern void machvec_setup (char **);
-extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
-extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
-extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
-extern void machvec_tlb_migrate_finish (struct mm_struct *);
-
-# if defined (CONFIG_IA64_HP_SIM)
-# include <asm/machvec_hpsim.h>
-# elif defined (CONFIG_IA64_DIG)
-# include <asm/machvec_dig.h>
-# elif defined (CONFIG_IA64_HP_ZX1)
-# include <asm/machvec_hpzx1.h>
-# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
-# include <asm/machvec_hpzx1_swiotlb.h>
-# elif defined (CONFIG_IA64_SGI_SN2)
-# include <asm/machvec_sn2.h>
-# elif defined (CONFIG_IA64_GENERIC)
-
-# ifdef MACHVEC_PLATFORM_HEADER
-# include MACHVEC_PLATFORM_HEADER
-# else
-# define platform_name ia64_mv.name
-# define platform_setup ia64_mv.setup
-# define platform_cpu_init ia64_mv.cpu_init
-# define platform_irq_init ia64_mv.irq_init
-# define platform_send_ipi ia64_mv.send_ipi
-# define platform_timer_interrupt ia64_mv.timer_interrupt
-# define platform_global_tlb_purge ia64_mv.global_tlb_purge
-# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
-# define platform_dma_init ia64_mv.dma_init
-# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
-# define platform_dma_free_coherent ia64_mv.dma_free_coherent
-# define platform_dma_map_single ia64_mv.dma_map_single
-# define platform_dma_unmap_single ia64_mv.dma_unmap_single
-# define platform_dma_map_sg ia64_mv.dma_map_sg
-# define platform_dma_unmap_sg ia64_mv.dma_unmap_sg
-# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
-# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
-# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
-# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
-# define platform_dma_mapping_error ia64_mv.dma_mapping_error
-# define platform_dma_supported ia64_mv.dma_supported
-# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
-# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
-# define platform_pci_legacy_read ia64_mv.pci_legacy_read
-# define platform_pci_legacy_write ia64_mv.pci_legacy_write
-# define platform_inb ia64_mv.inb
-# define platform_inw ia64_mv.inw
-# define platform_inl ia64_mv.inl
-# define platform_outb ia64_mv.outb
-# define platform_outw ia64_mv.outw
-# define platform_outl ia64_mv.outl
-# define platform_mmiowb ia64_mv.mmiowb
-# define platform_readb ia64_mv.readb
-# define platform_readw ia64_mv.readw
-# define platform_readl ia64_mv.readl
-# define platform_readq ia64_mv.readq
-# define platform_readb_relaxed ia64_mv.readb_relaxed
-# define platform_readw_relaxed ia64_mv.readw_relaxed
-# define platform_readl_relaxed ia64_mv.readl_relaxed
-# define platform_readq_relaxed ia64_mv.readq_relaxed
-# define platform_kernel_launch_event ia64_mv.kernel_launch_event
-#ifdef XEN
-# define platform_fw_init ia64_mv.fw_init
-#endif
-# endif
-
-/* __attribute__((__aligned__(16))) is required to make size of the
- * structure multiple of 16 bytes.
- * This will fillup the holes created because of section 3.3.1 in
- * Software Conventions guide.
- */
-struct ia64_machine_vector {
- const char *name;
- ia64_mv_setup_t *setup;
- ia64_mv_cpu_init_t *cpu_init;
- ia64_mv_irq_init_t *irq_init;
- ia64_mv_send_ipi_t *send_ipi;
- ia64_mv_timer_interrupt_t *timer_interrupt;
- ia64_mv_global_tlb_purge_t *global_tlb_purge;
- ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
- ia64_mv_dma_init *dma_init;
- ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
- ia64_mv_dma_free_coherent *dma_free_coherent;
- ia64_mv_dma_map_single *dma_map_single;
- ia64_mv_dma_unmap_single *dma_unmap_single;
- ia64_mv_dma_map_sg *dma_map_sg;
- ia64_mv_dma_unmap_sg *dma_unmap_sg;
- ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
- ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
- ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
- ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
- ia64_mv_dma_mapping_error *dma_mapping_error;
- ia64_mv_dma_supported *dma_supported;
- ia64_mv_local_vector_to_irq *local_vector_to_irq;
- ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
- ia64_mv_pci_legacy_read_t *pci_legacy_read;
- ia64_mv_pci_legacy_write_t *pci_legacy_write;
- ia64_mv_inb_t *inb;
- ia64_mv_inw_t *inw;
- ia64_mv_inl_t *inl;
- ia64_mv_outb_t *outb;
- ia64_mv_outw_t *outw;
- ia64_mv_outl_t *outl;
- ia64_mv_mmiowb_t *mmiowb;
- ia64_mv_readb_t *readb;
- ia64_mv_readw_t *readw;
- ia64_mv_readl_t *readl;
- ia64_mv_readq_t *readq;
- ia64_mv_readb_relaxed_t *readb_relaxed;
- ia64_mv_readw_relaxed_t *readw_relaxed;
- ia64_mv_readl_relaxed_t *readl_relaxed;
- ia64_mv_readq_relaxed_t *readq_relaxed;
- ia64_mv_kernel_launch_event_t *kernel_launch_event;
-#ifdef XEN
- ia64_mv_fw_init_t *fw_init;
-#endif
-} __attribute__((__aligned__(16))); /* align attrib? see above comment */
-
-#ifdef XEN
-#define MACHVEC_INIT(name) \
-{ \
- #name, \
- platform_setup, \
- platform_cpu_init, \
- platform_irq_init, \
- platform_send_ipi, \
- platform_timer_interrupt, \
- platform_global_tlb_purge, \
- platform_tlb_migrate_finish, \
- platform_dma_init, \
- platform_dma_alloc_coherent, \
- platform_dma_free_coherent, \
- platform_dma_map_single, \
- platform_dma_unmap_single, \
- platform_dma_map_sg, \
- platform_dma_unmap_sg, \
- platform_dma_sync_single_for_cpu, \
- platform_dma_sync_sg_for_cpu, \
- platform_dma_sync_single_for_device, \
- platform_dma_sync_sg_for_device, \
- platform_dma_mapping_error, \
- platform_dma_supported, \
- platform_local_vector_to_irq, \
- platform_pci_get_legacy_mem, \
- platform_pci_legacy_read, \
- platform_pci_legacy_write, \
- platform_inb, \
- platform_inw, \
- platform_inl, \
- platform_outb, \
- platform_outw, \
- platform_outl, \
- platform_mmiowb, \
- platform_readb, \
- platform_readw, \
- platform_readl, \
- platform_readq, \
- platform_readb_relaxed, \
- platform_readw_relaxed, \
- platform_readl_relaxed, \
- platform_readq_relaxed, \
- platform_kernel_launch_event, \
- platform_fw_init, \
-}
-#else
-#define MACHVEC_INIT(name) \
-{ \
- #name, \
- platform_setup, \
- platform_cpu_init, \
- platform_irq_init, \
- platform_send_ipi, \
- platform_timer_interrupt, \
- platform_global_tlb_purge, \
- platform_tlb_migrate_finish, \
- platform_dma_init, \
- platform_dma_alloc_coherent, \
- platform_dma_free_coherent, \
- platform_dma_map_single, \
- platform_dma_unmap_single, \
- platform_dma_map_sg, \
- platform_dma_unmap_sg, \
- platform_dma_sync_single_for_cpu, \
- platform_dma_sync_sg_for_cpu, \
- platform_dma_sync_single_for_device, \
- platform_dma_sync_sg_for_device, \
- platform_dma_mapping_error, \
- platform_dma_supported, \
- platform_local_vector_to_irq, \
- platform_pci_get_legacy_mem, \
- platform_pci_legacy_read, \
- platform_pci_legacy_write, \
- platform_inb, \
- platform_inw, \
- platform_inl, \
- platform_outb, \
- platform_outw, \
- platform_outl, \
- platform_mmiowb, \
- platform_readb, \
- platform_readw, \
- platform_readl, \
- platform_readq, \
- platform_readb_relaxed, \
- platform_readw_relaxed, \
- platform_readl_relaxed, \
- platform_readq_relaxed, \
- platform_kernel_launch_event \
-}
-#endif
-
-extern struct ia64_machine_vector ia64_mv;
-extern void machvec_init (const char *name);
-
-# else
-# error Unknown configuration. Update asm-ia64/machvec.h.
-# endif /* CONFIG_IA64_GENERIC */
-
-/*
- * Declare default routines which aren't declared anywhere else:
- */
-extern ia64_mv_dma_init swiotlb_init;
-extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
-extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
-extern ia64_mv_dma_map_single swiotlb_map_single;
-extern ia64_mv_dma_unmap_single swiotlb_unmap_single;
-extern ia64_mv_dma_map_sg swiotlb_map_sg;
-extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg;
-extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
-extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu;
-extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
-extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device;
-extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
-extern ia64_mv_dma_supported swiotlb_dma_supported;
-
-/*
- * Define default versions so we can extend machvec for new platforms without having
- * to update the machvec files for all existing platforms.
- */
-#ifndef platform_setup
-# define platform_setup machvec_setup
-#endif
-#ifndef platform_cpu_init
-# define platform_cpu_init machvec_noop
-#endif
-#ifndef platform_irq_init
-# define platform_irq_init machvec_noop
-#endif
-
-#ifndef platform_send_ipi
-# define platform_send_ipi ia64_send_ipi /* default to architected version */
-#endif
-#ifndef platform_timer_interrupt
-# define platform_timer_interrupt machvec_timer_interrupt
-#endif
-#ifndef platform_global_tlb_purge
-# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
-#endif
-#ifndef platform_tlb_migrate_finish
-# define platform_tlb_migrate_finish machvec_noop_mm
-#endif
-#ifndef platform_kernel_launch_event
-# define platform_kernel_launch_event machvec_noop
-#endif
-#ifndef platform_dma_init
-# define platform_dma_init swiotlb_init
-#endif
-#ifndef platform_dma_alloc_coherent
-# define platform_dma_alloc_coherent swiotlb_alloc_coherent
-#endif
-#ifndef platform_dma_free_coherent
-# define platform_dma_free_coherent swiotlb_free_coherent
-#endif
-#ifndef platform_dma_map_single
-# define platform_dma_map_single swiotlb_map_single
-#endif
-#ifndef platform_dma_unmap_single
-# define platform_dma_unmap_single swiotlb_unmap_single
-#endif
-#ifndef platform_dma_map_sg
-# define platform_dma_map_sg swiotlb_map_sg
-#endif
-#ifndef platform_dma_unmap_sg
-# define platform_dma_unmap_sg swiotlb_unmap_sg
-#endif
-#ifndef platform_dma_sync_single_for_cpu
-# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
-#endif
-#ifndef platform_dma_sync_sg_for_cpu
-# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
-#endif
-#ifndef platform_dma_sync_single_for_device
-# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
-#endif
-#ifndef platform_dma_sync_sg_for_device
-# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
-#endif
-#ifndef platform_dma_mapping_error
-# define platform_dma_mapping_error swiotlb_dma_mapping_error
-#endif
-#ifndef platform_dma_supported
-# define platform_dma_supported swiotlb_dma_supported
-#endif
-#ifndef platform_local_vector_to_irq
-# define platform_local_vector_to_irq __ia64_local_vector_to_irq
-#endif
-#ifndef platform_pci_get_legacy_mem
-# define platform_pci_get_legacy_mem ia64_pci_get_legacy_mem
-#endif
-#ifndef platform_pci_legacy_read
-# define platform_pci_legacy_read ia64_pci_legacy_read
-#endif
-#ifndef platform_pci_legacy_write
-# define platform_pci_legacy_write ia64_pci_legacy_write
-#endif
-#ifndef platform_inb
-# define platform_inb __ia64_inb
-#endif
-#ifndef platform_inw
-# define platform_inw __ia64_inw
-#endif
-#ifndef platform_inl
-# define platform_inl __ia64_inl
-#endif
-#ifndef platform_outb
-# define platform_outb __ia64_outb
-#endif
-#ifndef platform_outw
-# define platform_outw __ia64_outw
-#endif
-#ifndef platform_outl
-# define platform_outl __ia64_outl
-#endif
-#ifndef platform_mmiowb
-# define platform_mmiowb __ia64_mmiowb
-#endif
-#ifndef platform_readb
-# define platform_readb __ia64_readb
-#endif
-#ifndef platform_readw
-# define platform_readw __ia64_readw
-#endif
-#ifndef platform_readl
-# define platform_readl __ia64_readl
-#endif
-#ifndef platform_readq
-# define platform_readq __ia64_readq
-#endif
-#ifndef platform_readb_relaxed
-# define platform_readb_relaxed __ia64_readb_relaxed
-#endif
-#ifndef platform_readw_relaxed
-# define platform_readw_relaxed __ia64_readw_relaxed
-#endif
-#ifndef platform_readl_relaxed
-# define platform_readl_relaxed __ia64_readl_relaxed
-#endif
-#ifndef platform_readq_relaxed
-# define platform_readq_relaxed __ia64_readq_relaxed
-#endif
-#ifdef XEN
-#ifndef platform_fw_init
-# define platform_fw_init machvec_noop_platform_fw_init
-#endif
-#endif
-
-#endif /* _ASM_IA64_MACHVEC_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/machvec_dig.h b/xen/include/asm-ia64/linux-xen/asm/machvec_dig.h
deleted file mode 100644
index a8b658720f..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/machvec_dig.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _ASM_IA64_MACHVEC_DIG_h
-#define _ASM_IA64_MACHVEC_DIG_h
-
-extern ia64_mv_setup_t dig_setup;
-
-/*
- * This stuff has dual use!
- *
- * For a generic kernel, the macros are used to initialize the
- * platform's machvec structure. When compiling a non-generic kernel,
- * the macros are used directly.
- */
-#define platform_name "dig"
-#ifdef XEN
-/*
- * All the World is a PC .... yay! yay! yay!
- */
-extern ia64_mv_setup_t hpsim_setup;
-#define platform_setup hpsim_setup
-
-#define platform_dma_init machvec_noop
-#define platform_dma_alloc_coherent machvec_noop_dma_alloc_coherent
-#define platform_dma_free_coherent machvec_noop_dma_free_coherent
-#define platform_dma_map_single machvec_noop_dma_map_single
-#define platform_dma_unmap_single machvec_noop_dma_unmap_single
-#define platform_dma_map_sg machvec_noop_dma_map_sg
-#define platform_dma_unmap_sg machvec_noop_dma_unmap_sg
-#define platform_dma_sync_single_for_cpu \
- machvec_noop_dma_sync_single_for_cpu
-#define platform_dma_sync_sg_for_cpu \
- machvec_noop_dma_sync_sg_for_cpu
-#define platform_dma_sync_single_for_device \
- machvec_noop_dma_sync_single_for_device
-#define platform_dma_sync_sg_for_device \
- machvec_noop_dma_sync_sg_for_device
-#define platform_dma_mapping_error machvec_noop_dma_mapping_error
-#define platform_dma_supported machvec_noop_dma_supported
-
-#define platform_pci_get_legacy_mem machvec_noop_pci_get_legacy_mem
-#define platform_pci_legacy_read machvec_noop_pci_legacy_read
-#define platform_pci_legacy_write machvec_noop_pci_legacy_write
-#else
-#define platform_setup dig_setup
-#endif
-
-#endif /* _ASM_IA64_MACHVEC_DIG_h */
diff --git a/xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h b/xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h
deleted file mode 100644
index 96a85ce2ce..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef _ASM_IA64_MACHVEC_HPZX1_h
-#define _ASM_IA64_MACHVEC_HPZX1_h
-
-extern ia64_mv_setup_t dig_setup;
-extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
-extern ia64_mv_dma_free_coherent sba_free_coherent;
-extern ia64_mv_dma_map_single sba_map_single;
-extern ia64_mv_dma_unmap_single sba_unmap_single;
-extern ia64_mv_dma_map_sg sba_map_sg;
-extern ia64_mv_dma_unmap_sg sba_unmap_sg;
-extern ia64_mv_dma_supported sba_dma_supported;
-extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
-
-/*
- * This stuff has dual use!
- *
- * For a generic kernel, the macros are used to initialize the
- * platform's machvec structure. When compiling a non-generic kernel,
- * the macros are used directly.
- */
-#define platform_name "hpzx1"
-#ifdef XEN
-extern ia64_mv_setup_t hpsim_setup;
-extern ia64_mv_irq_init_t hpsim_irq_init;
-#define platform_setup hpsim_setup
-#define platform_irq_init hpsim_irq_init
-
-#define platform_dma_init machvec_noop
-#define platform_dma_alloc_coherent machvec_noop_dma_alloc_coherent
-#define platform_dma_free_coherent machvec_noop_dma_free_coherent
-#define platform_dma_map_single machvec_noop_dma_map_single
-#define platform_dma_unmap_single machvec_noop_dma_unmap_single
-#define platform_dma_map_sg machvec_noop_dma_map_sg
-#define platform_dma_unmap_sg machvec_noop_dma_unmap_sg
-#define platform_dma_sync_single_for_cpu \
- machvec_noop_dma_sync_single_for_cpu
-#define platform_dma_sync_sg_for_cpu \
- machvec_noop_dma_sync_sg_for_cpu
-#define platform_dma_sync_single_for_device \
- machvec_noop_dma_sync_single_for_device
-#define platform_dma_sync_sg_for_device \
- machvec_noop_dma_sync_sg_for_device
-#define platform_dma_mapping_error machvec_noop_dma_mapping_error
-#define platform_dma_supported machvec_noop_dma_supported
-
-#define platform_pci_get_legacy_mem machvec_noop_pci_get_legacy_mem
-#define platform_pci_legacy_read machvec_noop_pci_legacy_read
-#define platform_pci_legacy_write machvec_noop_pci_legacy_write
-#else
-#define platform_setup dig_setup
-#define platform_dma_init machvec_noop
-#define platform_dma_alloc_coherent sba_alloc_coherent
-#define platform_dma_free_coherent sba_free_coherent
-#define platform_dma_map_single sba_map_single
-#define platform_dma_unmap_single sba_unmap_single
-#define platform_dma_map_sg sba_map_sg
-#define platform_dma_unmap_sg sba_unmap_sg
-#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
-#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
-#define platform_dma_sync_single_for_device machvec_dma_sync_single
-#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
-#define platform_dma_supported sba_dma_supported
-#define platform_dma_mapping_error sba_dma_mapping_error
-#endif
-
-#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff --git a/xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h b/xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h
deleted file mode 100644
index 0574a85c3c..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
- */
-
-#ifndef _ASM_IA64_MACHVEC_SN2_H
-#define _ASM_IA64_MACHVEC_SN2_H
-
-extern ia64_mv_setup_t sn_setup;
-extern ia64_mv_cpu_init_t sn_cpu_init;
-extern ia64_mv_irq_init_t sn_irq_init;
-extern ia64_mv_send_ipi_t sn2_send_IPI;
-extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
-extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
-extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
-extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
-extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
-extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
-extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write;
-extern ia64_mv_inb_t __sn_inb;
-extern ia64_mv_inw_t __sn_inw;
-extern ia64_mv_inl_t __sn_inl;
-extern ia64_mv_outb_t __sn_outb;
-extern ia64_mv_outw_t __sn_outw;
-extern ia64_mv_outl_t __sn_outl;
-extern ia64_mv_mmiowb_t __sn_mmiowb;
-extern ia64_mv_readb_t __sn_readb;
-extern ia64_mv_readw_t __sn_readw;
-extern ia64_mv_readl_t __sn_readl;
-extern ia64_mv_readq_t __sn_readq;
-extern ia64_mv_readb_t __sn_readb_relaxed;
-extern ia64_mv_readw_t __sn_readw_relaxed;
-extern ia64_mv_readl_t __sn_readl_relaxed;
-extern ia64_mv_readq_t __sn_readq_relaxed;
-extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
-extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
-extern ia64_mv_dma_map_single sn_dma_map_single;
-extern ia64_mv_dma_unmap_single sn_dma_unmap_single;
-extern ia64_mv_dma_map_sg sn_dma_map_sg;
-extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg;
-extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
-extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
-extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
-extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
-extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
-extern ia64_mv_dma_supported sn_dma_supported;
-#ifndef XEN
-extern ia64_mv_migrate_t sn_migrate;
-extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
-extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq;
-#endif
-#ifdef XEN
-extern ia64_mv_fw_init_t sn2_dom_fw_init;
-#endif
-
-
-/*
- * This stuff has dual use!
- *
- * For a generic kernel, the macros are used to initialize the
- * platform's machvec structure. When compiling a non-generic kernel,
- * the macros are used directly.
- */
-#define platform_name "sn2"
-#define platform_setup sn_setup
-#define platform_cpu_init sn_cpu_init
-#define platform_irq_init sn_irq_init
-#define platform_send_ipi sn2_send_IPI
-#ifndef XEN
-#define platform_timer_interrupt sn_timer_interrupt
-#endif
-#define platform_global_tlb_purge sn2_global_tlb_purge
-#ifndef XEN
-#define platform_tlb_migrate_finish sn_tlb_migrate_finish
-#endif
-#define platform_pci_fixup sn_pci_fixup
-#define platform_inb __sn_inb
-#define platform_inw __sn_inw
-#define platform_inl __sn_inl
-#define platform_outb __sn_outb
-#define platform_outw __sn_outw
-#define platform_outl __sn_outl
-#define platform_mmiowb __sn_mmiowb
-#define platform_readb __sn_readb
-#define platform_readw __sn_readw
-#define platform_readl __sn_readl
-#define platform_readq __sn_readq
-#define platform_readb_relaxed __sn_readb_relaxed
-#define platform_readw_relaxed __sn_readw_relaxed
-#define platform_readl_relaxed __sn_readl_relaxed
-#define platform_readq_relaxed __sn_readq_relaxed
-#define platform_local_vector_to_irq sn_local_vector_to_irq
-#ifdef XEN
-#define platform_pci_get_legacy_mem machvec_noop_pci_get_legacy_mem
-#define platform_pci_legacy_read machvec_noop_pci_legacy_read
-#define platform_pci_legacy_write machvec_noop_pci_legacy_write
-#else
-#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
-#define platform_pci_legacy_read sn_pci_legacy_read
-#define platform_pci_legacy_write sn_pci_legacy_write
-#endif
-#define platform_dma_init machvec_noop
-#ifdef XEN
-#define platform_dma_alloc_coherent machvec_noop_dma_alloc_coherent
-#define platform_dma_free_coherent machvec_noop_dma_free_coherent
-#define platform_dma_map_single machvec_noop_dma_map_single
-#define platform_dma_unmap_single machvec_noop_dma_unmap_single
-#define platform_dma_map_sg machvec_noop_dma_map_sg
-#define platform_dma_unmap_sg machvec_noop_dma_unmap_sg
-#define platform_dma_sync_single_for_cpu \
- machvec_noop_dma_sync_single_for_cpu
-#define platform_dma_sync_sg_for_cpu \
- machvec_noop_dma_sync_sg_for_cpu
-#define platform_dma_sync_single_for_device \
- machvec_noop_dma_sync_single_for_device
-#define platform_dma_sync_sg_for_device machvec_noop_dma_sync_sg_for_device
-#define platform_dma_mapping_error machvec_noop_dma_mapping_error
-#define platform_dma_supported machvec_noop_dma_supported
-#else
-#define platform_dma_alloc_coherent sn_dma_alloc_coherent
-#define platform_dma_free_coherent sn_dma_free_coherent
-#define platform_dma_map_single sn_dma_map_single
-#define platform_dma_unmap_single sn_dma_unmap_single
-#define platform_dma_map_sg sn_dma_map_sg
-#define platform_dma_unmap_sg sn_dma_unmap_sg
-#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
-#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
-#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
-#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
-#define platform_dma_mapping_error sn_dma_mapping_error
-#define platform_dma_supported sn_dma_supported
-#define platform_migrate sn_migrate
-#endif
-
-#ifndef XEN
-#ifdef CONFIG_PCI_MSI
-#define platform_setup_msi_irq sn_setup_msi_irq
-#define platform_teardown_msi_irq sn_teardown_msi_irq
-#else
-#define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
-#define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
-#endif
-#endif
-
-#ifdef XEN
-#define platform_fw_init sn2_dom_fw_init
-#endif
-
-#include <asm/sn/io.h>
-
-#endif /* _ASM_IA64_MACHVEC_SN2_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/mca_asm.h b/xen/include/asm-ia64/linux-xen/asm/mca_asm.h
deleted file mode 100644
index c5d7c75f73..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/mca_asm.h
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * File: mca_asm.h
- *
- * Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) Vijay Chander (vijay@engr.sgi.com)
- * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
- * Copyright (C) 2000 Hewlett-Packard Co.
- * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2002 Intel Corp.
- * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
- */
-#ifndef _ASM_IA64_MCA_ASM_H
-#define _ASM_IA64_MCA_ASM_H
-
-#define PSR_IC 13
-#define PSR_I 14
-#define PSR_DT 17
-#define PSR_RT 27
-#define PSR_MC 35
-#define PSR_IT 36
-#define PSR_BN 44
-
-/*
- * This macro converts a instruction virtual address to a physical address
- * Right now for simulation purposes the virtual addresses are
- * direct mapped to physical addresses.
- * 1. Lop off bits 61 thru 63 in the virtual address
- */
-#ifdef XEN
-#define INST_VA_TO_PA(addr) \
- dep addr = 0, addr, 60, 4
-#else
-#define INST_VA_TO_PA(addr) \
- dep addr = 0, addr, 61, 3
-#endif
-/*
- * This macro converts a data virtual address to a physical address
- * Right now for simulation purposes the virtual addresses are
- * direct mapped to physical addresses.
- * 1. Lop off bits 61 thru 63 in the virtual address
- */
-#define DATA_VA_TO_PA(addr) \
- tpa addr = addr
-/*
- * This macro converts a data physical address to a virtual address
- * Right now for simulation purposes the virtual addresses are
- * direct mapped to physical addresses.
- * 1. Put 0x7 in bits 61 thru 63.
- */
-#ifdef XEN
-#define DATA_PA_TO_VA(addr,temp) \
- mov temp = 0xf ;; \
- dep addr = temp, addr, 60, 4
-#else
-#define DATA_PA_TO_VA(addr,temp) \
- mov temp = 0x7 ;; \
- dep addr = temp, addr, 61, 3
-#endif
-
-#ifdef XEN
-/*
- * void set_per_cpu_data(*ret)
- * {
- * int i;
- * for (i = 0; i < 64; i++) {
- * if (ia64_mca_tlb_list[i].cr_lid == ia64_getreg(_IA64_REG_CR_LID)) {
- * *ret = ia64_mca_tlb_list[i].percpu_paddr;
- * return;
- * }
- * }
- * while(1); // Endless loop on error
- * }
- */
-#define SET_PER_CPU_DATA(reg,_tmp1,_tmp2,_tmp3) \
- LOAD_PHYSICAL(p0,reg,ia64_mca_tlb_list);;\
- mov _tmp1 = ar.lc;; \
- mov ar.lc = NR_CPUS-1; \
- mov _tmp2 = cr.lid;; \
-10: ld8 _tmp3 = [reg],16;; \
- cmp.ne p6, p7 = _tmp3, _tmp2;; \
-(p7) br.cond.dpnt 30f;; \
- br.cloop.sptk.few 10b;; \
-20: br 20b;;/* Endless loop on error */ \
-30: mov ar.lc = _tmp1; \
- adds reg = IA64_MCA_PERCPU_OFFSET-IA64_MCA_TLB_INFO_SIZE, reg;; \
- ld8 reg = [reg]
-
-#define GET_THIS_PADDR(reg, var) \
- SET_PER_CPU_DATA(reg,r5,r6,r7);; \
- addl reg = THIS_CPU(var) - PERCPU_ADDR, reg
-#else
-#define GET_THIS_PADDR(reg, var) \
- mov reg = IA64_KR(PER_CPU_DATA);; \
- addl reg = THIS_CPU(var), reg
-#endif
-
-/*
- * This macro jumps to the instruction at the given virtual address
- * and starts execution in physical mode with all the address
- * translations turned off.
- * 1. Save the current psr
- * 2. Make sure that all the upper 32 bits are off
- *
- * 3. Clear the interrupt enable and interrupt state collection bits
- * in the psr before updating the ipsr and iip.
- *
- * 4. Turn off the instruction, data and rse translation bits of the psr
- * and store the new value into ipsr
- * Also make sure that the interrupts are disabled.
- * Ensure that we are in little endian mode.
- * [psr.{rt, it, dt, i, be} = 0]
- *
- * 5. Get the physical address corresponding to the virtual address
- * of the next instruction bundle and put it in iip.
- * (Using magic numbers 24 and 40 in the deposint instruction since
- * the IA64_SDK code directly maps to lower 24bits as physical address
- * from a virtual address).
- *
- * 6. Do an rfi to move the values from ipsr to psr and iip to ip.
- */
-#define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
- mov old_psr = psr; \
- ;; \
- dep old_psr = 0, old_psr, 32, 32; \
- \
- mov ar.rsc = 0 ; \
- ;; \
- srlz.d; \
- mov temp2 = ar.bspstore; \
- ;; \
- DATA_VA_TO_PA(temp2); \
- ;; \
- mov temp1 = ar.rnat; \
- ;; \
- mov ar.bspstore = temp2; \
- ;; \
- mov ar.rnat = temp1; \
- mov temp1 = psr; \
- mov temp2 = psr; \
- ;; \
- \
- dep temp2 = 0, temp2, PSR_IC, 2; \
- ;; \
- mov psr.l = temp2; \
- ;; \
- srlz.d; \
- dep temp1 = 0, temp1, 32, 32; \
- ;; \
- dep temp1 = 0, temp1, PSR_IT, 1; \
- ;; \
- dep temp1 = 0, temp1, PSR_DT, 1; \
- ;; \
- dep temp1 = 0, temp1, PSR_RT, 1; \
- ;; \
- dep temp1 = 0, temp1, PSR_I, 1; \
- ;; \
- dep temp1 = 0, temp1, PSR_IC, 1; \
- ;; \
- dep temp1 = -1, temp1, PSR_MC, 1; \
- ;; \
- mov cr.ipsr = temp1; \
- ;; \
- LOAD_PHYSICAL(p0, temp2, start_addr); \
- ;; \
- mov cr.iip = temp2; \
- mov cr.ifs = r0; \
- DATA_VA_TO_PA(sp); \
- DATA_VA_TO_PA(gp); \
- ;; \
- srlz.i; \
- ;; \
- nop 1; \
- nop 2; \
- nop 1; \
- nop 2; \
- rfi; \
- ;;
-
-/*
- * This macro jumps to the instruction at the given virtual address
- * and starts execution in virtual mode with all the address
- * translations turned on.
- * 1. Get the old saved psr
- *
- * 2. Clear the interrupt state collection bit in the current psr.
- *
- * 3. Set the instruction translation bit back in the old psr
- * Note we have to do this since we are right now saving only the
- * lower 32-bits of old psr.(Also the old psr has the data and
- * rse translation bits on)
- *
- * 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1.
- *
- * 5. Reset the current thread pointer (r13).
- *
- * 6. Set iip to the virtual address of the next instruction bundle.
- *
- * 7. Do an rfi to move ipsr to psr and iip to ip.
- */
-
-#ifdef XEN
-#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
- mov temp2 = psr; \
- ;; \
- mov old_psr = temp2; \
- ;; \
- dep temp2 = 0, temp2, PSR_IC, 2; \
- ;; \
- mov psr.l = temp2; \
- mov ar.rsc = 0; \
- ;; \
- srlz.d; \
- mov r13 = ar.k6; \
- mov temp2 = ar.bspstore; \
- ;; \
- DATA_PA_TO_VA(temp2,temp1); \
- ;; \
- mov temp1 = ar.rnat; \
- ;; \
- mov ar.bspstore = temp2; \
- ;; \
- mov ar.rnat = temp1; \
- ;; \
- mov temp1 = old_psr; \
- ;; \
- mov temp2 = 1; \
- ;; \
- dep temp1 = temp2, temp1, PSR_IC, 1; \
- ;; \
- dep temp1 = temp2, temp1, PSR_IT, 1; \
- ;; \
- dep temp1 = temp2, temp1, PSR_DT, 1; \
- ;; \
- dep temp1 = temp2, temp1, PSR_RT, 1; \
- ;; \
- dep temp1 = temp2, temp1, PSR_BN, 1; \
- ;; \
- \
- mov cr.ipsr = temp1; \
- movl temp2 = start_addr; \
- ;; \
- mov cr.iip = temp2; \
- movl gp = __gp; \
- ;; \
- DATA_PA_TO_VA(sp, temp1); \
- srlz.i; \
- ;; \
- nop 1; \
- nop 2; \
- nop 1; \
- rfi \
- ;;
-#else
-#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
- mov temp2 = psr; \
- ;; \
- mov old_psr = temp2; \
- ;; \
- dep temp2 = 0, temp2, PSR_IC, 2; \
- ;; \
- mov psr.l = temp2; \
- mov ar.rsc = 0; \
- ;; \
- srlz.d; \
- mov r13 = ar.k6; \
- mov temp2 = ar.bspstore; \
- ;; \
- DATA_PA_TO_VA(temp2,temp1); \
- ;; \
- mov temp1 = ar.rnat; \
- ;; \
- mov ar.bspstore = temp2; \
- ;; \
- mov ar.rnat = temp1; \
- ;; \
- mov temp1 = old_psr; \
- ;; \
- mov temp2 = 1; \
- ;; \
- dep temp1 = temp2, temp1, PSR_IC, 1; \
- ;; \
- dep temp1 = temp2, temp1, PSR_IT, 1; \
- ;; \
- dep temp1 = temp2, temp1, PSR_DT, 1; \
- ;; \
- dep temp1 = temp2, temp1, PSR_RT, 1; \
- ;; \
- dep temp1 = temp2, temp1, PSR_BN, 1; \
- ;; \
- \
- mov cr.ipsr = temp1; \
- movl temp2 = start_addr; \
- ;; \
- mov cr.iip = temp2; \
- ;; \
- DATA_PA_TO_VA(sp, temp1); \
- DATA_PA_TO_VA(gp, temp2); \
- srlz.i; \
- ;; \
- nop 1; \
- nop 2; \
- nop 1; \
- rfi \
- ;;
-#endif
-
-/*
- * The following offsets capture the order in which the
- * RSE related registers from the old context are
- * saved onto the new stack frame.
- *
- * +-----------------------+
- * |NDIRTY [BSP - BSPSTORE]|
- * +-----------------------+
- * | RNAT |
- * +-----------------------+
- * | BSPSTORE |
- * +-----------------------+
- * | IFS |
- * +-----------------------+
- * | PFS |
- * +-----------------------+
- * | RSC |
- * +-----------------------+ <-------- Bottom of new stack frame
- */
-#define rse_rsc_offset 0
-#define rse_pfs_offset (rse_rsc_offset+0x08)
-#define rse_ifs_offset (rse_pfs_offset+0x08)
-#define rse_bspstore_offset (rse_ifs_offset+0x08)
-#define rse_rnat_offset (rse_bspstore_offset+0x08)
-#define rse_ndirty_offset (rse_rnat_offset+0x08)
-
-/*
- * rse_switch_context
- *
- * 1. Save old RSC onto the new stack frame
- * 2. Save PFS onto new stack frame
- * 3. Cover the old frame and start a new frame.
- * 4. Save IFS onto new stack frame
- * 5. Save the old BSPSTORE on the new stack frame
- * 6. Save the old RNAT on the new stack frame
- * 7. Write BSPSTORE with the new backing store pointer
- * 8. Read and save the new BSP to calculate the #dirty registers
- * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2
- */
-#define rse_switch_context(temp,p_stackframe,p_bspstore) \
- ;; \
- mov temp=ar.rsc;; \
- st8 [p_stackframe]=temp,8;; \
- mov temp=ar.pfs;; \
- st8 [p_stackframe]=temp,8; \
- cover ;; \
- mov temp=cr.ifs;; \
- st8 [p_stackframe]=temp,8;; \
- mov temp=ar.bspstore;; \
- st8 [p_stackframe]=temp,8;; \
- mov temp=ar.rnat;; \
- st8 [p_stackframe]=temp,8; \
- mov ar.bspstore=p_bspstore;; \
- mov temp=ar.bsp;; \
- sub temp=temp,p_bspstore;; \
- st8 [p_stackframe]=temp,8;;
-
-/*
- * rse_return_context
- * 1. Allocate a zero-sized frame
- * 2. Store the number of dirty registers RSC.loadrs field
- * 3. Issue a loadrs to insure that any registers from the interrupted
- * context which were saved on the new stack frame have been loaded
- * back into the stacked registers
- * 4. Restore BSPSTORE
- * 5. Restore RNAT
- * 6. Restore PFS
- * 7. Restore IFS
- * 8. Restore RSC
- * 9. Issue an RFI
- */
-#define rse_return_context(psr_mask_reg,temp,p_stackframe) \
- ;; \
- alloc temp=ar.pfs,0,0,0,0; \
- add p_stackframe=rse_ndirty_offset,p_stackframe;; \
- ld8 temp=[p_stackframe];; \
- shl temp=temp,16;; \
- mov ar.rsc=temp;; \
- loadrs;; \
- add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\
- ld8 temp=[p_stackframe];; \
- mov ar.bspstore=temp;; \
- add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\
- ld8 temp=[p_stackframe];; \
- mov ar.rnat=temp;; \
- add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \
- ld8 temp=[p_stackframe];; \
- mov ar.pfs=temp;; \
- add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \
- ld8 temp=[p_stackframe];; \
- mov cr.ifs=temp;; \
- add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \
- ld8 temp=[p_stackframe];; \
- mov ar.rsc=temp ; \
- mov temp=psr;; \
- or temp=temp,psr_mask_reg;; \
- mov cr.ipsr=temp;; \
- mov temp=ip;; \
- add temp=0x30,temp;; \
- mov cr.iip=temp;; \
- srlz.i;; \
- rfi;;
-
-#endif /* _ASM_IA64_MCA_ASM_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/meminit.h b/xen/include/asm-ia64/linux-xen/asm/meminit.h
deleted file mode 100644
index 1a4c0a9d6a..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/meminit.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef meminit_h
-#define meminit_h
-
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/config.h>
-
-/*
- * Entries defined so far:
- * - boot param structure itself
- * - memory map
-#ifndef XEN
- * - initrd (optional)
-#endif
- * - command line string
- * - kernel code & data
-#ifdef XEN
- * - dom0 code & data
- * - initrd (optional)
-#endif
- * - Kernel memory map built from EFI memory map
- * - Crash kernel for kdump
- *
- * More could be added if necessary
- */
-#ifndef XEN
-#define IA64_MAX_RSVD_REGIONS 7
-#else
-#define IA64_MAX_RSVD_REGIONS 8
-#endif
-
-struct rsvd_region {
- unsigned long start; /* virtual address of beginning of element */
- unsigned long end; /* virtual address of end of element + 1 */
-};
-
-extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
-extern int num_rsvd_regions;
-
-extern void find_memory (void);
-extern void reserve_memory (void);
-extern void find_initrd (void);
-extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
-extern void efi_memmap_init(unsigned long *, unsigned long *);
-
-/*
- * For rounding an address to the next IA64_GRANULE_SIZE or order
- */
-#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
-#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
-#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
-
-#ifdef CONFIG_DISCONTIGMEM
- extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
-#else
-# define call_pernode_memory(start, len, func) (*func)(start, len, 0)
-#endif
-
-#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
-
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
- extern unsigned long vmalloc_end;
- extern struct page *vmem_map;
- extern int find_largest_hole (u64 start, u64 end, void *arg);
- extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
-#endif
-
-#endif /* meminit_h */
diff --git a/xen/include/asm-ia64/linux-xen/asm/numa.h b/xen/include/asm-ia64/linux-xen/asm/numa.h
deleted file mode 100644
index ee4d7e8c7b..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/numa.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * This file contains NUMA specific prototypes and definitions.
- *
- * 2002/08/05 Erich Focht <efocht@ess.nec.de>
- *
- */
-#ifndef _ASM_IA64_NUMA_H
-#define _ASM_IA64_NUMA_H
-
-#include <linux/config.h>
-
-#ifdef CONFIG_NUMA
-
-#include <linux/cache.h>
-#include <linux/cpumask.h>
-#include <linux/numa.h>
-#ifndef XEN /* dependency loop when this is included */
-#include <linux/smp.h>
-#endif
-#include <linux/threads.h>
-
-#include <asm/mmzone.h>
-
-extern int srat_rev;
-
-extern u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
-#ifndef XEN
-extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
-#else
-extern cpumask_t node_to_cpu_mask[] __cacheline_aligned;
-#endif
-
-/* Stuff below this line could be architecture independent */
-
-extern int num_node_memblks; /* total number of memory chunks */
-
-/*
- * List of node memory chunks. Filled when parsing SRAT table to
- * obtain information about memory nodes.
-*/
-
-struct node_memblk_s {
- unsigned long start_paddr;
- unsigned long size;
- int nid; /* which logical node contains this chunk? */
- int bank; /* which mem bank on this node */
-};
-
-struct node_cpuid_s {
- u16 phys_id; /* id << 8 | eid */
- int nid; /* logical node containing this CPU */
-};
-
-#ifndef XEN
-extern struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
-#else
-extern struct node_memblk_s node_memblk[];
-#endif
-extern struct node_cpuid_s node_cpuid[NR_CPUS];
-
-/*
- * ACPI 2.0 SLIT (System Locality Information Table)
- * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
- *
- * This is a matrix with "distances" between nodes, they should be
- * proportional to the memory access latency ratios.
- */
-
-#ifndef XEN
-extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
-#else
-extern u8 numa_slit[];
-#endif
-#define node_distance(from,to) (numa_slit[(from) * num_online_nodes() + (to)])
-
-extern int paddr_to_nid(unsigned long paddr);
-
-#define local_nodeid (cpu_to_node_map[smp_processor_id()])
-
-#else /* !CONFIG_NUMA */
-
-#define paddr_to_nid(addr) 0
-
-#endif /* CONFIG_NUMA */
-
-#ifdef XEN
-#define phys_to_nid(paddr) paddr_to_nid(paddr)
-extern int pxm_to_node(int pxm);
-extern int node_to_pxm(int node);
-extern void __acpi_map_pxm_to_node(int, int);
-extern int acpi_map_pxm_to_node(int);
-#endif
-
-#endif /* _ASM_IA64_NUMA_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/page.h b/xen/include/asm-ia64/linux-xen/asm/page.h
deleted file mode 100644
index 14a7216ad2..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/page.h
+++ /dev/null
@@ -1,227 +0,0 @@
-#ifndef _ASM_IA64_PAGE_H
-#define _ASM_IA64_PAGE_H
-/*
- * Pagetable related stuff.
- *
- * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/config.h>
-
-#include <asm/intrinsics.h>
-#include <asm/types.h>
-
-#ifdef XEN /* This will go away with newer upstream */
-#define RGN_SHIFT 61
-#define RGN_BASE(r) (r << RGN_SHIFT)
-#define RGN_BITS RGN_BASE(-1)
-#define RGN_HPAGE REGION_HPAGE
-#ifndef CONFIG_HUGETLB_PAGE
-# define REGION_HPAGE (4UL)
-#endif
-#endif
-
-/*
- * PAGE_SHIFT determines the actual kernel page size.
- */
-#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
-# define PAGE_SHIFT 12
-#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
-# define PAGE_SHIFT 13
-#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
-# define PAGE_SHIFT 14
-#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
-# define PAGE_SHIFT 16
-#else
-# error Unsupported page size!
-#endif
-
-#define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
-#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
-
-#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */
-#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
-
-#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */
-
-#ifdef CONFIG_HUGETLB_PAGE
-# define REGION_HPAGE (4UL) /* note: this is hardcoded in reload_context()!*/
-# define REGION_SHIFT 61
-# define HPAGE_REGION_BASE (REGION_HPAGE << REGION_SHIFT)
-# define HPAGE_SHIFT hpage_shift
-# define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
-# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
-# define HPAGE_MASK (~(HPAGE_SIZE - 1))
-
-# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
-#endif /* CONFIG_HUGETLB_PAGE */
-
-#ifdef __ASSEMBLY__
-# define __pa(x) ((x) - PAGE_OFFSET)
-# define __va(x) ((x) + PAGE_OFFSET)
-#else /* !__ASSEMBLY */
-# ifdef __KERNEL__
-# define STRICT_MM_TYPECHECKS
-
-extern void clear_page (void *page);
-extern void copy_page (void *to, void *from);
-
-/*
- * clear_user_page() and copy_user_page() can't be inline functions because
- * flush_dcache_page() can't be defined until later...
- */
-#define clear_user_page(addr, vaddr, page) \
-do { \
- clear_page(addr); \
- flush_dcache_page(page); \
-} while (0)
-
-#define copy_user_page(to, from, vaddr, page) \
-do { \
- copy_page((to), (from)); \
- flush_dcache_page(page); \
-} while (0)
-
-#ifndef XEN
-#define alloc_zeroed_user_highpage(vma, vaddr) \
-({ \
- struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr); \
- if (page) \
- flush_dcache_page(page); \
- page; \
-})
-#endif
-
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-
-#define virt_addr_valid(kaddr) mfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-
-#ifndef XEN
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-extern int ia64_mfn_valid (unsigned long pfn);
-#else
-# define ia64_mfn_valid(pfn) 1
-#endif
-
-#ifndef CONFIG_DISCONTIGMEM
-# define mfn_valid(pfn) (((pfn) < max_mapnr) && ia64_mfn_valid(pfn))
-# define page_to_mfn(page) ((unsigned long) (page - mem_map))
-# define mfn_to_page(pfn) (mem_map + (pfn))
-#else
-extern struct page *vmem_map;
-extern unsigned long max_low_pfn;
-# define mfn_valid(pfn) (((pfn) < max_low_pfn) && ia64_mfn_valid(pfn))
-# define page_to_mfn(page) ((unsigned long) (page - vmem_map))
-# define mfn_to_page(pfn) (vmem_map + (pfn))
-#endif
-
-#define page_to_maddr(page) (page_to_mfn(page) << PAGE_SHIFT)
-#define virt_to_page(kaddr) mfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#endif
-
-typedef union ia64_va {
- struct {
- unsigned long off : 61; /* intra-region offset */
- unsigned long reg : 3; /* region number */
- } f;
- unsigned long l;
- void *p;
-} ia64_va;
-
-#ifndef XEN
-/*
- * Note: These macros depend on the fact that PAGE_OFFSET has all
- * region bits set to 1 and all other bits set to zero. They are
- * expressed in this way to ensure they result in a single "dep"
- * instruction.
- */
-#define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
-#define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
-#endif /* XEN */
-
-#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
-#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
-
-#define REGION_SIZE REGION_NUMBER(1)
-#define REGION_KERNEL 7
-
-#ifdef CONFIG_HUGETLB_PAGE
-# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
- | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
-# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-# define is_hugepage_only_range(mm, addr, len) \
- (REGION_NUMBER(addr) == REGION_HPAGE && \
- REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE)
-extern unsigned int hpage_shift;
-#endif
-
-static __inline__ int
-get_order (unsigned long size)
-{
- long double d = size - 1;
- long order;
-
- order = ia64_getf_exp(d);
- order = order - PAGE_SHIFT - 0xffff + 1;
- if (order < 0)
- order = 0;
- return order;
-}
-
-# endif /* __KERNEL__ */
-#endif /* !__ASSEMBLY__ */
-
-#ifdef STRICT_MM_TYPECHECKS
- /*
- * These are used to make use of C type-checking..
- */
- typedef struct { unsigned long pte; } pte_t;
- typedef struct { unsigned long pmd; } pmd_t;
- typedef struct { unsigned long pgd; } pgd_t;
- typedef struct { unsigned long pgprot; } pgprot_t;
-
-# define pte_val(x) ((x).pte)
-# define pmd_val(x) ((x).pmd)
-# define pgd_val(x) ((x).pgd)
-# define pgprot_val(x) ((x).pgprot)
-
-# define __pte(x) ((pte_t) { (x) } )
-# define __pgprot(x) ((pgprot_t) { (x) } )
-
-#else /* !STRICT_MM_TYPECHECKS */
- /*
- * .. while these make it easier on the compiler
- */
-# ifndef __ASSEMBLY__
- typedef unsigned long pte_t;
- typedef unsigned long pmd_t;
- typedef unsigned long pgd_t;
- typedef unsigned long pgprot_t;
-# endif
-
-# define pte_val(x) (x)
-# define pmd_val(x) (x)
-# define pgd_val(x) (x)
-# define pgprot_val(x) (x)
-
-# define __pte(x) (x)
-# define __pgd(x) (x)
-# define __pgprot(x) (x)
-#endif /* !STRICT_MM_TYPECHECKS */
-
-#ifndef XEN
-#define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000)
-
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
- (((current->personality & READ_IMPLIES_EXEC) != 0) \
- ? VM_EXEC : 0))
-
-#else
-#include <asm/xenpage.h>
-#endif
-
-#endif /* _ASM_IA64_PAGE_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/pal.h b/xen/include/asm-ia64/linux-xen/asm/pal.h
deleted file mode 100644
index c6340ed591..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/pal.h
+++ /dev/null
@@ -1,1760 +0,0 @@
-#ifndef _ASM_IA64_PAL_H
-#define _ASM_IA64_PAL_H
-
-/*
- * Processor Abstraction Layer definitions.
- *
- * This is based on Intel IA-64 Architecture Software Developer's Manual rev 1.0
- * chapter 11 IA-64 Processor Abstraction Layer
- *
- * Copyright (C) 1998-2001 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
- *
- * 99/10/01 davidm Make sure we pass zero for reserved parameters.
- * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6.
- * 00/03/23 cfleck Modified processor min-state save area to match updated PAL & SAL info
- * 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added
- * 00/05/25 eranian Support for stack calls, and static physical calls
- * 00/06/18 eranian Support for stacked physical calls
- * 06/10/26 rja Support for Intel Itanium Architecture Software Developer's
- * Manual Rev 2.2 (Jan 2006)
- */
-
-/*
- * Note that some of these calls use a static-register only calling
- * convention which has nothing to do with the regular calling
- * convention.
- */
-#define PAL_CACHE_FLUSH 1 /* flush i/d cache */
-#define PAL_CACHE_INFO 2 /* get detailed i/d cache info */
-#define PAL_CACHE_INIT 3 /* initialize i/d cache */
-#define PAL_CACHE_SUMMARY 4 /* get summary of cache hierarchy */
-#define PAL_MEM_ATTRIB 5 /* list supported memory attributes */
-#define PAL_PTCE_INFO 6 /* purge TLB info */
-#define PAL_VM_INFO 7 /* return supported virtual memory features */
-#define PAL_VM_SUMMARY 8 /* return summary on supported vm features */
-#define PAL_BUS_GET_FEATURES 9 /* return processor bus interface features settings */
-#define PAL_BUS_SET_FEATURES 10 /* set processor bus features */
-#define PAL_DEBUG_INFO 11 /* get number of debug registers */
-#define PAL_FIXED_ADDR 12 /* get fixed component of processors's directed address */
-#define PAL_FREQ_BASE 13 /* base frequency of the platform */
-#define PAL_FREQ_RATIOS 14 /* ratio of processor, bus and ITC frequency */
-#define PAL_PERF_MON_INFO 15 /* return performance monitor info */
-#define PAL_PLATFORM_ADDR 16 /* set processor interrupt block and IO port space addr */
-#define PAL_PROC_GET_FEATURES 17 /* get configurable processor features & settings */
-#define PAL_PROC_SET_FEATURES 18 /* enable/disable configurable processor features */
-#define PAL_RSE_INFO 19 /* return rse information */
-#define PAL_VERSION 20 /* return version of PAL code */
-#define PAL_MC_CLEAR_LOG 21 /* clear all processor log info */
-#define PAL_MC_DRAIN 22 /* drain operations which could result in an MCA */
-#define PAL_MC_EXPECTED 23 /* set/reset expected MCA indicator */
-#define PAL_MC_DYNAMIC_STATE 24 /* get processor dynamic state */
-#define PAL_MC_ERROR_INFO 25 /* get processor MCA info and static state */
-#define PAL_MC_RESUME 26 /* Return to interrupted process */
-#define PAL_MC_REGISTER_MEM 27 /* Register memory for PAL to use during MCAs and inits */
-#define PAL_HALT 28 /* enter the low power HALT state */
-#define PAL_HALT_LIGHT 29 /* enter the low power light halt state*/
-#define PAL_COPY_INFO 30 /* returns info needed to relocate PAL */
-#define PAL_CACHE_LINE_INIT 31 /* init tags & data of cache line */
-#define PAL_PMI_ENTRYPOINT 32 /* register PMI memory entry points with the processor */
-#define PAL_ENTER_IA_32_ENV 33 /* enter IA-32 system environment */
-#define PAL_VM_PAGE_SIZE 34 /* return vm TC and page walker page sizes */
-
-#define PAL_MEM_FOR_TEST 37 /* get amount of memory needed for late processor test */
-#define PAL_CACHE_PROT_INFO 38 /* get i/d cache protection info */
-#define PAL_REGISTER_INFO 39 /* return AR and CR register information*/
-#define PAL_SHUTDOWN 40 /* enter processor shutdown state */
-#define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */
-#define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */
-#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */
-#define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */
-#define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */
-
-#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
-#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
-#define PAL_TEST_PROC 258 /* perform late processor self-test */
-#define PAL_CACHE_READ 259 /* read tag & data of cacheline for diagnostic testing */
-#define PAL_CACHE_WRITE 260 /* write tag & data of cacheline for diagnostic testing */
-#define PAL_VM_TR_READ 261 /* read contents of translation register */
-#define PAL_GET_PSTATE 262 /* get the current P-state */
-#define PAL_SET_PSTATE 263 /* set the P-state */
-#define PAL_BRAND_INFO 274 /* Processor branding information */
-
-#define PAL_GET_PSTATE_TYPE_LASTSET 0
-#define PAL_GET_PSTATE_TYPE_AVGANDRESET 1
-#define PAL_GET_PSTATE_TYPE_AVGNORESET 2
-#define PAL_GET_PSTATE_TYPE_INSTANT 3
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <asm/fpu.h>
-#ifdef XEN
-#include <linux/efi.h>
-#endif
-
-/*
- * Data types needed to pass information into PAL procedures and
- * interpret information returned by them.
- */
-
-/* Return status from the PAL procedure */
-typedef s64 pal_status_t;
-
-#define PAL_STATUS_SUCCESS 0 /* No error */
-#define PAL_STATUS_UNIMPLEMENTED (-1) /* Unimplemented procedure */
-#define PAL_STATUS_EINVAL (-2) /* Invalid argument */
-#define PAL_STATUS_ERROR (-3) /* Error */
-#define PAL_STATUS_CACHE_INIT_FAIL (-4) /* Could not initialize the
- * specified level and type of
- * cache without sideeffects
- * and "restrict" was 1
- */
-#define PAL_STATUS_REQUIRES_MEMORY (-9) /* Call requires PAL memory buffer */
-
-/* Processor cache level in the hierarchy */
-typedef u64 pal_cache_level_t;
-#define PAL_CACHE_LEVEL_L0 0 /* L0 */
-#define PAL_CACHE_LEVEL_L1 1 /* L1 */
-#define PAL_CACHE_LEVEL_L2 2 /* L2 */
-
-
-/* Processor cache type at a particular level in the hierarchy */
-
-typedef u64 pal_cache_type_t;
-#define PAL_CACHE_TYPE_INSTRUCTION 1 /* Instruction cache */
-#define PAL_CACHE_TYPE_DATA 2 /* Data or unified cache */
-#define PAL_CACHE_TYPE_INSTRUCTION_DATA 3 /* Both Data & Instruction */
-#ifdef XEN
-#define PAL_CACHE_TYPE_COHERENT 4 /* Make I&D-cache coherent */
-#endif
-
-
-#define PAL_CACHE_FLUSH_INVALIDATE 1 /* Invalidate clean lines */
-#define PAL_CACHE_FLUSH_CHK_INTRS 2 /* check for interrupts/mc while flushing */
-
-/* Processor cache line size in bytes */
-typedef int pal_cache_line_size_t;
-
-/* Processor cache line state */
-typedef u64 pal_cache_line_state_t;
-#define PAL_CACHE_LINE_STATE_INVALID 0 /* Invalid */
-#define PAL_CACHE_LINE_STATE_SHARED 1 /* Shared */
-#define PAL_CACHE_LINE_STATE_EXCLUSIVE 2 /* Exclusive */
-#define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */
-
-typedef struct pal_freq_ratio {
- u32 den, num; /* numerator & denominator */
-} itc_ratio, proc_ratio;
-
-typedef union pal_cache_config_info_1_s {
- struct {
- u64 u : 1, /* 0 Unified cache ? */
- at : 2, /* 2-1 Cache mem attr*/
- reserved : 5, /* 7-3 Reserved */
- associativity : 8, /* 16-8 Associativity*/
- line_size : 8, /* 23-17 Line size */
- stride : 8, /* 31-24 Stride */
- store_latency : 8, /*39-32 Store latency*/
- load_latency : 8, /* 47-40 Load latency*/
- store_hints : 8, /* 55-48 Store hints*/
- load_hints : 8; /* 63-56 Load hints */
- } pcci1_bits;
- u64 pcci1_data;
-} pal_cache_config_info_1_t;
-
-typedef union pal_cache_config_info_2_s {
- struct {
- u32 cache_size; /*cache size in bytes*/
-
-
- u32 alias_boundary : 8, /* 39-32 aliased addr
- * separation for max
- * performance.
- */
- tag_ls_bit : 8, /* 47-40 LSb of addr*/
- tag_ms_bit : 8, /* 55-48 MSb of addr*/
- reserved : 8; /* 63-56 Reserved */
- } pcci2_bits;
- u64 pcci2_data;
-} pal_cache_config_info_2_t;
-
-
-typedef struct pal_cache_config_info_s {
- pal_status_t pcci_status;
- pal_cache_config_info_1_t pcci_info_1;
- pal_cache_config_info_2_t pcci_info_2;
- u64 pcci_reserved;
-} pal_cache_config_info_t;
-
-#define pcci_ld_hints pcci_info_1.pcci1_bits.load_hints
-#define pcci_st_hints pcci_info_1.pcci1_bits.store_hints
-#define pcci_ld_latency pcci_info_1.pcci1_bits.load_latency
-#define pcci_st_latency pcci_info_1.pcci1_bits.store_latency
-#define pcci_stride pcci_info_1.pcci1_bits.stride
-#define pcci_line_size pcci_info_1.pcci1_bits.line_size
-#define pcci_assoc pcci_info_1.pcci1_bits.associativity
-#define pcci_cache_attr pcci_info_1.pcci1_bits.at
-#define pcci_unified pcci_info_1.pcci1_bits.u
-#define pcci_tag_msb pcci_info_2.pcci2_bits.tag_ms_bit
-#define pcci_tag_lsb pcci_info_2.pcci2_bits.tag_ls_bit
-#define pcci_alias_boundary pcci_info_2.pcci2_bits.alias_boundary
-#define pcci_cache_size pcci_info_2.pcci2_bits.cache_size
-
-
-
-/* Possible values for cache attributes */
-
-#define PAL_CACHE_ATTR_WT 0 /* Write through cache */
-#define PAL_CACHE_ATTR_WB 1 /* Write back cache */
-#define PAL_CACHE_ATTR_WT_OR_WB 2 /* Either write thru or write
- * back depending on TLB
- * memory attributes
- */
-
-
-/* Possible values for cache hints */
-
-#define PAL_CACHE_HINT_TEMP_1 0 /* Temporal level 1 */
-#define PAL_CACHE_HINT_NTEMP_1 1 /* Non-temporal level 1 */
-#define PAL_CACHE_HINT_NTEMP_ALL 3 /* Non-temporal all levels */
-
-/* Processor cache protection information */
-typedef union pal_cache_protection_element_u {
- u32 pcpi_data;
- struct {
- u32 data_bits : 8, /* # data bits covered by
- * each unit of protection
- */
-
- tagprot_lsb : 6, /* Least -do- */
- tagprot_msb : 6, /* Most Sig. tag address
- * bit that this
- * protection covers.
- */
- prot_bits : 6, /* # of protection bits */
- method : 4, /* Protection method */
- t_d : 2; /* Indicates which part
- * of the cache this
- * protection encoding
- * applies.
- */
- } pcp_info;
-} pal_cache_protection_element_t;
-
-#define pcpi_cache_prot_part pcp_info.t_d
-#define pcpi_prot_method pcp_info.method
-#define pcpi_prot_bits pcp_info.prot_bits
-#define pcpi_tagprot_msb pcp_info.tagprot_msb
-#define pcpi_tagprot_lsb pcp_info.tagprot_lsb
-#define pcpi_data_bits pcp_info.data_bits
-
-/* Processor cache part encodings */
-#define PAL_CACHE_PROT_PART_DATA 0 /* Data protection */
-#define PAL_CACHE_PROT_PART_TAG 1 /* Tag protection */
-#define PAL_CACHE_PROT_PART_TAG_DATA 2 /* Tag+data protection (tag is
- * more significant )
- */
-#define PAL_CACHE_PROT_PART_DATA_TAG 3 /* Data+tag protection (data is
- * more significant )
- */
-#define PAL_CACHE_PROT_PART_MAX 6
-
-
-typedef struct pal_cache_protection_info_s {
- pal_status_t pcpi_status;
- pal_cache_protection_element_t pcp_info[PAL_CACHE_PROT_PART_MAX];
-} pal_cache_protection_info_t;
-
-
-/* Processor cache protection method encodings */
-#define PAL_CACHE_PROT_METHOD_NONE 0 /* No protection */
-#define PAL_CACHE_PROT_METHOD_ODD_PARITY 1 /* Odd parity */
-#define PAL_CACHE_PROT_METHOD_EVEN_PARITY 2 /* Even parity */
-#define PAL_CACHE_PROT_METHOD_ECC 3 /* ECC protection */
-
-
-/* Processor cache line identification in the hierarchy */
-typedef union pal_cache_line_id_u {
- u64 pclid_data;
- struct {
- u64 cache_type : 8, /* 7-0 cache type */
- level : 8, /* 15-8 level of the
- * cache in the
- * hierarchy.
- */
- way : 8, /* 23-16 way in the set
- */
- part : 8, /* 31-24 part of the
- * cache
- */
- reserved : 32; /* 63-32 is reserved*/
- } pclid_info_read;
- struct {
- u64 cache_type : 8, /* 7-0 cache type */
- level : 8, /* 15-8 level of the
- * cache in the
- * hierarchy.
- */
- way : 8, /* 23-16 way in the set
- */
- part : 8, /* 31-24 part of the
- * cache
- */
- mesi : 8, /* 39-32 cache line
- * state
- */
- start : 8, /* 47-40 lsb of data to
- * invert
- */
- length : 8, /* 55-48 #bits to
- * invert
- */
- trigger : 8; /* 63-56 Trigger error
- * by doing a load
- * after the write
- */
-
- } pclid_info_write;
-} pal_cache_line_id_u_t;
-
-#define pclid_read_part pclid_info_read.part
-#define pclid_read_way pclid_info_read.way
-#define pclid_read_level pclid_info_read.level
-#define pclid_read_cache_type pclid_info_read.cache_type
-
-#define pclid_write_trigger pclid_info_write.trigger
-#define pclid_write_length pclid_info_write.length
-#define pclid_write_start pclid_info_write.start
-#define pclid_write_mesi pclid_info_write.mesi
-#define pclid_write_part pclid_info_write.part
-#define pclid_write_way pclid_info_write.way
-#define pclid_write_level pclid_info_write.level
-#define pclid_write_cache_type pclid_info_write.cache_type
-
-/* Processor cache line part encodings */
-#define PAL_CACHE_LINE_ID_PART_DATA 0 /* Data */
-#define PAL_CACHE_LINE_ID_PART_TAG 1 /* Tag */
-#define PAL_CACHE_LINE_ID_PART_DATA_PROT 2 /* Data protection */
-#define PAL_CACHE_LINE_ID_PART_TAG_PROT 3 /* Tag protection */
-#define PAL_CACHE_LINE_ID_PART_DATA_TAG_PROT 4 /* Data+tag
- * protection
- */
-typedef struct pal_cache_line_info_s {
- pal_status_t pcli_status; /* Return status of the read cache line
- * info call.
- */
- u64 pcli_data; /* 64-bit data, tag, protection bits .. */
- u64 pcli_data_len; /* data length in bits */
- pal_cache_line_state_t pcli_cache_line_state; /* mesi state */
-
-} pal_cache_line_info_t;
-
-
-/* Machine Check related crap */
-
-/* Pending event status bits */
-typedef u64 pal_mc_pending_events_t;
-
-#define PAL_MC_PENDING_MCA (1 << 0)
-#define PAL_MC_PENDING_INIT (1 << 1)
-
-/* Error information type */
-typedef u64 pal_mc_info_index_t;
-
-#define PAL_MC_INFO_PROCESSOR 0 /* Processor */
-#define PAL_MC_INFO_CACHE_CHECK 1 /* Cache check */
-#define PAL_MC_INFO_TLB_CHECK 2 /* Tlb check */
-#define PAL_MC_INFO_BUS_CHECK 3 /* Bus check */
-#define PAL_MC_INFO_REQ_ADDR 4 /* Requestor address */
-#define PAL_MC_INFO_RESP_ADDR 5 /* Responder address */
-#define PAL_MC_INFO_TARGET_ADDR 6 /* Target address */
-#define PAL_MC_INFO_IMPL_DEP 7 /* Implementation
- * dependent
- */
-
-#define PAL_TLB_CHECK_OP_PURGE 8
-
-typedef struct pal_process_state_info_s {
- u64 reserved1 : 2,
- rz : 1, /* PAL_CHECK processor
- * rendezvous
- * successful.
- */
-
- ra : 1, /* PAL_CHECK attempted
- * a rendezvous.
- */
- me : 1, /* Distinct multiple
- * errors occurred
- */
-
- mn : 1, /* Min. state save
- * area has been
- * registered with PAL
- */
-
- sy : 1, /* Storage integrity
- * synched
- */
-
-
- co : 1, /* Continuable */
- ci : 1, /* MC isolated */
- us : 1, /* Uncontained storage
- * damage.
- */
-
-
- hd : 1, /* Non-essential hw
- * lost (no loss of
- * functionality)
- * causing the
- * processor to run in
- * degraded mode.
- */
-
- tl : 1, /* 1 => MC occurred
- * after an instr was
- * executed but before
- * the trap that
- * resulted from instr
- * execution was
- * generated.
- * (Trap Lost )
- */
- mi : 1, /* More information available
- * call PAL_MC_ERROR_INFO
- */
- pi : 1, /* Precise instruction pointer */
- pm : 1, /* Precise min-state save area */
-
- dy : 1, /* Processor dynamic
- * state valid
- */
-
-
- in : 1, /* 0 = MC, 1 = INIT */
- rs : 1, /* RSE valid */
- cm : 1, /* MC corrected */
- ex : 1, /* MC is expected */
- cr : 1, /* Control regs valid*/
- pc : 1, /* Perf cntrs valid */
- dr : 1, /* Debug regs valid */
- tr : 1, /* Translation regs
- * valid
- */
- rr : 1, /* Region regs valid */
- ar : 1, /* App regs valid */
- br : 1, /* Branch regs valid */
- pr : 1, /* Predicate registers
- * valid
- */
-
- fp : 1, /* fp registers valid*/
- b1 : 1, /* Preserved bank one
- * general registers
- * are valid
- */
- b0 : 1, /* Preserved bank zero
- * general registers
- * are valid
- */
- gr : 1, /* General registers
- * are valid
- * (excl. banked regs)
- */
- dsize : 16, /* size of dynamic
- * state returned
- * by the processor
- */
-
- se : 1, /* Shared error. MCA in a
- shared structure */
- reserved2 : 10,
- cc : 1, /* Cache check */
- tc : 1, /* TLB check */
- bc : 1, /* Bus check */
- rc : 1, /* Register file check */
- uc : 1; /* Uarch check */
-
-} pal_processor_state_info_t;
-
-typedef struct pal_cache_check_info_s {
- u64 op : 4, /* Type of cache
- * operation that
- * caused the machine
- * check.
- */
- level : 2, /* Cache level */
- reserved1 : 2,
- dl : 1, /* Failure in data part
- * of cache line
- */
- tl : 1, /* Failure in tag part
- * of cache line
- */
- dc : 1, /* Failure in dcache */
- ic : 1, /* Failure in icache */
- mesi : 3, /* Cache line state */
- mv : 1, /* mesi valid */
- way : 5, /* Way in which the
- * error occurred
- */
- wiv : 1, /* Way field valid */
- reserved2 : 1,
- dp : 1, /* Data poisoned on MBE */
- reserved3 : 8,
-
- index : 20, /* Cache line index */
- reserved4 : 2,
-
- is : 1, /* instruction set (1 == ia32) */
- iv : 1, /* instruction set field valid */
- pl : 2, /* privilege level */
- pv : 1, /* privilege level field valid */
- mcc : 1, /* Machine check corrected */
- tv : 1, /* Target address
- * structure is valid
- */
- rq : 1, /* Requester identifier
- * structure is valid
- */
- rp : 1, /* Responder identifier
- * structure is valid
- */
- pi : 1; /* Precise instruction pointer
- * structure is valid
- */
-} pal_cache_check_info_t;
-
-typedef struct pal_tlb_check_info_s {
-
- u64 tr_slot : 8, /* Slot# of TR where
- * error occurred
- */
- trv : 1, /* tr_slot field is valid */
- reserved1 : 1,
- level : 2, /* TLB level where failure occurred */
- reserved2 : 4,
- dtr : 1, /* Fail in data TR */
- itr : 1, /* Fail in inst TR */
- dtc : 1, /* Fail in data TC */
- itc : 1, /* Fail in inst. TC */
- op : 4, /* Cache operation */
- reserved3 : 30,
-
- is : 1, /* instruction set (1 == ia32) */
- iv : 1, /* instruction set field valid */
- pl : 2, /* privilege level */
- pv : 1, /* privilege level field valid */
- mcc : 1, /* Machine check corrected */
- tv : 1, /* Target address
- * structure is valid
- */
- rq : 1, /* Requester identifier
- * structure is valid
- */
- rp : 1, /* Responder identifier
- * structure is valid
- */
- pi : 1; /* Precise instruction pointer
- * structure is valid
- */
-} pal_tlb_check_info_t;
-
-typedef struct pal_bus_check_info_s {
- u64 size : 5, /* Xaction size */
- ib : 1, /* Internal bus error */
- eb : 1, /* External bus error */
- cc : 1, /* Error occurred
- * during cache-cache
- * transfer.
- */
- type : 8, /* Bus xaction type*/
- sev : 5, /* Bus error severity*/
- hier : 2, /* Bus hierarchy level */
- dp : 1, /* Data poisoned on MBE */
- bsi : 8, /* Bus error status
- * info
- */
- reserved2 : 22,
-
- is : 1, /* instruction set (1 == ia32) */
- iv : 1, /* instruction set field valid */
- pl : 2, /* privilege level */
- pv : 1, /* privilege level field valid */
- mcc : 1, /* Machine check corrected */
- tv : 1, /* Target address
- * structure is valid
- */
- rq : 1, /* Requester identifier
- * structure is valid
- */
- rp : 1, /* Responder identifier
- * structure is valid
- */
- pi : 1; /* Precise instruction pointer
- * structure is valid
- */
-} pal_bus_check_info_t;
-
-typedef struct pal_reg_file_check_info_s {
- u64 id : 4, /* Register file identifier */
- op : 4, /* Type of register
- * operation that
- * caused the machine
- * check.
- */
- reg_num : 7, /* Register number */
- rnv : 1, /* reg_num valid */
- reserved2 : 38,
-
- is : 1, /* instruction set (1 == ia32) */
- iv : 1, /* instruction set field valid */
- pl : 2, /* privilege level */
- pv : 1, /* privilege level field valid */
- mcc : 1, /* Machine check corrected */
- reserved3 : 3,
- pi : 1; /* Precise instruction pointer
- * structure is valid
- */
-} pal_reg_file_check_info_t;
-
-typedef struct pal_uarch_check_info_s {
- u64 sid : 5, /* Structure identification */
- level : 3, /* Level of failure */
- array_id : 4, /* Array identification */
- op : 4, /* Type of
- * operation that
- * caused the machine
- * check.
- */
- way : 6, /* Way of structure */
- wv : 1, /* way valid */
- xv : 1, /* index valid */
- reserved1 : 8,
- index : 8, /* Index or set of the uarch
- * structure that failed.
- */
- reserved2 : 24,
-
- is : 1, /* instruction set (1 == ia32) */
- iv : 1, /* instruction set field valid */
- pl : 2, /* privilege level */
- pv : 1, /* privilege level field valid */
- mcc : 1, /* Machine check corrected */
- tv : 1, /* Target address
- * structure is valid
- */
- rq : 1, /* Requester identifier
- * structure is valid
- */
- rp : 1, /* Responder identifier
- * structure is valid
- */
- pi : 1; /* Precise instruction pointer
- * structure is valid
- */
-} pal_uarch_check_info_t;
-
-typedef union pal_mc_error_info_u {
- u64 pmei_data;
- pal_processor_state_info_t pme_processor;
- pal_cache_check_info_t pme_cache;
- pal_tlb_check_info_t pme_tlb;
- pal_bus_check_info_t pme_bus;
- pal_reg_file_check_info_t pme_reg_file;
- pal_uarch_check_info_t pme_uarch;
-} pal_mc_error_info_t;
-
-#define pmci_proc_unknown_check pme_processor.uc
-#define pmci_proc_bus_check pme_processor.bc
-#define pmci_proc_tlb_check pme_processor.tc
-#define pmci_proc_cache_check pme_processor.cc
-#define pmci_proc_dynamic_state_size pme_processor.dsize
-#define pmci_proc_gpr_valid pme_processor.gr
-#define pmci_proc_preserved_bank0_gpr_valid pme_processor.b0
-#define pmci_proc_preserved_bank1_gpr_valid pme_processor.b1
-#define pmci_proc_fp_valid pme_processor.fp
-#define pmci_proc_predicate_regs_valid pme_processor.pr
-#define pmci_proc_branch_regs_valid pme_processor.br
-#define pmci_proc_app_regs_valid pme_processor.ar
-#define pmci_proc_region_regs_valid pme_processor.rr
-#define pmci_proc_translation_regs_valid pme_processor.tr
-#define pmci_proc_debug_regs_valid pme_processor.dr
-#define pmci_proc_perf_counters_valid pme_processor.pc
-#define pmci_proc_control_regs_valid pme_processor.cr
-#define pmci_proc_machine_check_expected pme_processor.ex
-#define pmci_proc_machine_check_corrected pme_processor.cm
-#define pmci_proc_rse_valid pme_processor.rs
-#define pmci_proc_machine_check_or_init pme_processor.in
-#define pmci_proc_dynamic_state_valid pme_processor.dy
-#define pmci_proc_operation pme_processor.op
-#define pmci_proc_trap_lost pme_processor.tl
-#define pmci_proc_hardware_damage pme_processor.hd
-#define pmci_proc_uncontained_storage_damage pme_processor.us
-#define pmci_proc_machine_check_isolated pme_processor.ci
-#define pmci_proc_continuable pme_processor.co
-#define pmci_proc_storage_intergrity_synced pme_processor.sy
-#define pmci_proc_min_state_save_area_regd pme_processor.mn
-#define pmci_proc_distinct_multiple_errors pme_processor.me
-#define pmci_proc_pal_attempted_rendezvous pme_processor.ra
-#define pmci_proc_pal_rendezvous_complete pme_processor.rz
-
-
-#define pmci_cache_level pme_cache.level
-#define pmci_cache_line_state pme_cache.mesi
-#define pmci_cache_line_state_valid pme_cache.mv
-#define pmci_cache_line_index pme_cache.index
-#define pmci_cache_instr_cache_fail pme_cache.ic
-#define pmci_cache_data_cache_fail pme_cache.dc
-#define pmci_cache_line_tag_fail pme_cache.tl
-#define pmci_cache_line_data_fail pme_cache.dl
-#define pmci_cache_operation pme_cache.op
-#define pmci_cache_way_valid pme_cache.wv
-#define pmci_cache_target_address_valid pme_cache.tv
-#define pmci_cache_way pme_cache.way
-#define pmci_cache_mc pme_cache.mc
-
-#define pmci_tlb_instr_translation_cache_fail pme_tlb.itc
-#define pmci_tlb_data_translation_cache_fail pme_tlb.dtc
-#define pmci_tlb_instr_translation_reg_fail pme_tlb.itr
-#define pmci_tlb_data_translation_reg_fail pme_tlb.dtr
-#define pmci_tlb_translation_reg_slot pme_tlb.tr_slot
-#define pmci_tlb_mc pme_tlb.mc
-
-#define pmci_bus_status_info pme_bus.bsi
-#define pmci_bus_req_address_valid pme_bus.rq
-#define pmci_bus_resp_address_valid pme_bus.rp
-#define pmci_bus_target_address_valid pme_bus.tv
-#define pmci_bus_error_severity pme_bus.sev
-#define pmci_bus_transaction_type pme_bus.type
-#define pmci_bus_cache_cache_transfer pme_bus.cc
-#define pmci_bus_transaction_size pme_bus.size
-#define pmci_bus_internal_error pme_bus.ib
-#define pmci_bus_external_error pme_bus.eb
-#define pmci_bus_mc pme_bus.mc
-
-/*
- * NOTE: this min_state_save area struct only includes the 1KB
- * architectural state save area. The other 3 KB is scratch space
- * for PAL.
- */
-
-typedef struct pal_min_state_area_s {
- u64 pmsa_nat_bits; /* nat bits for saved GRs */
- u64 pmsa_gr[15]; /* GR1 - GR15 */
- u64 pmsa_bank0_gr[16]; /* GR16 - GR31 */
- u64 pmsa_bank1_gr[16]; /* GR16 - GR31 */
- u64 pmsa_pr; /* predicate registers */
- u64 pmsa_br0; /* branch register 0 */
- u64 pmsa_rsc; /* ar.rsc */
- u64 pmsa_iip; /* cr.iip */
- u64 pmsa_ipsr; /* cr.ipsr */
- u64 pmsa_ifs; /* cr.ifs */
- u64 pmsa_xip; /* previous iip */
- u64 pmsa_xpsr; /* previous psr */
- u64 pmsa_xfs; /* previous ifs */
- u64 pmsa_br1; /* branch register 1 */
- u64 pmsa_reserved[70]; /* pal_min_state_area should total to 1KB */
-} pal_min_state_area_t;
-
-
-struct ia64_pal_retval {
- /*
- * A zero status value indicates call completed without error.
- * A negative status value indicates reason of call failure.
- * A positive status value indicates success but an
- * informational value should be printed (e.g., "reboot for
- * change to take effect").
- */
- s64 status;
- u64 v0;
- u64 v1;
- u64 v2;
-};
-
-/*
- * Note: Currently unused PAL arguments are generally labeled
- * "reserved" so the value specified in the PAL documentation
- * (generally 0) MUST be passed. Reserved parameters are not optional
- * parameters.
- */
-extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64);
-extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
-extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
-extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
-extern void ia64_save_scratch_fpregs (struct ia64_fpreg *);
-extern void ia64_load_scratch_fpregs (struct ia64_fpreg *);
-
-#define PAL_CALL(iprv,a0,a1,a2,a3) do { \
- struct ia64_fpreg fr[6]; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- iprv = ia64_pal_call_static(a0, a1, a2, a3); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
-} while (0)
-
-#define PAL_CALL_STK(iprv,a0,a1,a2,a3) do { \
- struct ia64_fpreg fr[6]; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- iprv = ia64_pal_call_stacked(a0, a1, a2, a3); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
-} while (0)
-
-#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) do { \
- struct ia64_fpreg fr[6]; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- iprv = ia64_pal_call_phys_static(a0, a1, a2, a3); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
-} while (0)
-
-#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) do { \
- struct ia64_fpreg fr[6]; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- ia64_save_scratch_fpregs(fr); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- ia64_load_scratch_fpregs(fr); \
-} while (0)
-
-typedef int (*ia64_pal_handler) (u64, ...);
-extern ia64_pal_handler ia64_pal;
-extern void ia64_pal_handler_init (void *);
-
-extern ia64_pal_handler ia64_pal;
-
-extern pal_cache_config_info_t l0d_cache_config_info;
-extern pal_cache_config_info_t l0i_cache_config_info;
-extern pal_cache_config_info_t l1_cache_config_info;
-extern pal_cache_config_info_t l2_cache_config_info;
-
-extern pal_cache_protection_info_t l0d_cache_protection_info;
-extern pal_cache_protection_info_t l0i_cache_protection_info;
-extern pal_cache_protection_info_t l1_cache_protection_info;
-extern pal_cache_protection_info_t l2_cache_protection_info;
-
-extern pal_cache_config_info_t pal_cache_config_info_get(pal_cache_level_t,
- pal_cache_type_t);
-
-extern pal_cache_protection_info_t pal_cache_protection_info_get(pal_cache_level_t,
- pal_cache_type_t);
-
-
-extern void pal_error(int);
-
-
-/* Useful wrappers for the current list of pal procedures */
-
-typedef union pal_bus_features_u {
- u64 pal_bus_features_val;
- struct {
- u64 pbf_reserved1 : 29;
- u64 pbf_req_bus_parking : 1;
- u64 pbf_bus_lock_mask : 1;
- u64 pbf_enable_half_xfer_rate : 1;
- u64 pbf_reserved2 : 20;
- u64 pbf_enable_shared_line_replace : 1;
- u64 pbf_enable_exclusive_line_replace : 1;
- u64 pbf_disable_xaction_queueing : 1;
- u64 pbf_disable_resp_err_check : 1;
- u64 pbf_disable_berr_check : 1;
- u64 pbf_disable_bus_req_internal_err_signal : 1;
- u64 pbf_disable_bus_req_berr_signal : 1;
- u64 pbf_disable_bus_init_event_check : 1;
- u64 pbf_disable_bus_init_event_signal : 1;
- u64 pbf_disable_bus_addr_err_check : 1;
- u64 pbf_disable_bus_addr_err_signal : 1;
- u64 pbf_disable_bus_data_err_check : 1;
- } pal_bus_features_s;
-} pal_bus_features_u_t;
-
-extern void pal_bus_features_print (u64);
-
-/* Provide information about configurable processor bus features */
-static inline s64
-ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail,
- pal_bus_features_u_t *features_status,
- pal_bus_features_u_t *features_control)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0);
- if (features_avail)
- features_avail->pal_bus_features_val = iprv.v0;
- if (features_status)
- features_status->pal_bus_features_val = iprv.v1;
- if (features_control)
- features_control->pal_bus_features_val = iprv.v2;
- return iprv.status;
-}
-
-/* Enables/disables specific processor bus features */
-static inline s64
-ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0);
- return iprv.status;
-}
-
-/* Get detailed cache information */
-static inline s64
-ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_info_t *conf)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0);
-
- if (iprv.status == 0) {
- conf->pcci_status = iprv.status;
- conf->pcci_info_1.pcci1_data = iprv.v0;
- conf->pcci_info_2.pcci2_data = iprv.v1;
- conf->pcci_reserved = iprv.v2;
- }
- return iprv.status;
-
-}
-
-/* Get detailed cche protection information */
-static inline s64
-ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_info_t *prot)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0);
-
- if (iprv.status == 0) {
- prot->pcpi_status = iprv.status;
- prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff;
- prot->pcp_info[1].pcpi_data = iprv.v0 >> 32;
- prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff;
- prot->pcp_info[3].pcpi_data = iprv.v1 >> 32;
- prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff;
- prot->pcp_info[5].pcpi_data = iprv.v2 >> 32;
- }
- return iprv.status;
-}
-
-/*
- * Flush the processor instruction or data caches. *PROGRESS must be
- * initialized to zero before calling this for the first time..
- */
-static inline s64
-ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, *progress);
- if (vector)
- *vector = iprv.v0;
- *progress = iprv.v1;
- return iprv.status;
-}
-
-
-/* Initialize the processor controlled caches */
-static inline s64
-ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
- return iprv.status;
-}
-
-/* Initialize the tags and data of a data or unified cache line of
- * processor controlled cache to known values without the availability
- * of backing memory.
- */
-static inline s64
-ia64_pal_cache_line_init (u64 physical_addr, u64 data_value)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_CACHE_LINE_INIT, physical_addr, data_value, 0);
- return iprv.status;
-}
-
-
-/* Read the data and tag of a processor controlled cache line for diags */
-static inline s64
-ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_PHYS_STK(iprv, PAL_CACHE_READ, line_id.pclid_data,
- physical_addr, 0);
- return iprv.status;
-}
-
-/* Return summary information about the hierarchy of caches controlled by the processor */
-static inline s64
-ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0);
- if (cache_levels)
- *cache_levels = iprv.v0;
- if (unique_caches)
- *unique_caches = iprv.v1;
- return iprv.status;
-}
-
-/* Write the data and tag of a processor-controlled cache line for diags */
-static inline s64
-ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_PHYS_STK(iprv, PAL_CACHE_WRITE, line_id.pclid_data,
- physical_addr, data);
- return iprv.status;
-}
-
-
-/* Return the parameters needed to copy relocatable PAL procedures from ROM to memory */
-static inline s64
-ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics,
- u64 *buffer_size, u64 *buffer_align)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_COPY_INFO, copy_type, num_procs, num_iopics);
- if (buffer_size)
- *buffer_size = iprv.v0;
- if (buffer_align)
- *buffer_align = iprv.v1;
- return iprv.status;
-}
-
-/* Copy relocatable PAL procedures from ROM to memory */
-static inline s64
-ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc_offset)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_COPY_PAL, target_addr, alloc_size, processor);
- if (pal_proc_offset)
- *pal_proc_offset = iprv.v0;
- return iprv.status;
-}
-
-/* Return the number of instruction and data debug register pairs */
-static inline s64
-ia64_pal_debug_info (u64 *inst_regs, u64 *data_regs)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0);
- if (inst_regs)
- *inst_regs = iprv.v0;
- if (data_regs)
- *data_regs = iprv.v1;
-
- return iprv.status;
-}
-
-#ifdef TBD
-/* Switch from IA64-system environment to IA-32 system environment */
-static inline s64
-ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_ENTER_IA_32_ENV, ia32_env1, ia32_env2, ia32_env3);
- return iprv.status;
-}
-#endif
-
-/* Get unique geographical address of this processor on its bus */
-static inline s64
-ia64_pal_fixed_addr (u64 *global_unique_addr)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_FIXED_ADDR, 0, 0, 0);
- if (global_unique_addr)
- *global_unique_addr = iprv.v0;
- return iprv.status;
-}
-
-/* Get base frequency of the platform if generated by the processor */
-static inline s64
-ia64_pal_freq_base (u64 *platform_base_freq)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0);
- if (platform_base_freq)
- *platform_base_freq = iprv.v0;
- return iprv.status;
-}
-
-/*
- * Get the ratios for processor frequency, bus frequency and interval timer to
- * to base frequency of the platform
- */
-static inline s64
-ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio,
- struct pal_freq_ratio *itc_ratio)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_FREQ_RATIOS, 0, 0, 0);
- if (proc_ratio)
- *(u64 *)proc_ratio = iprv.v0;
- if (bus_ratio)
- *(u64 *)bus_ratio = iprv.v1;
- if (itc_ratio)
- *(u64 *)itc_ratio = iprv.v2;
- return iprv.status;
-}
-
-/*
- * Get the current hardware resource sharing policy of the processor
- */
-static inline s64
-ia64_pal_get_hw_policy (u64 proc_num, u64 *cur_policy, u64 *num_impacted,
- u64 *la)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_GET_HW_POLICY, proc_num, 0, 0);
- if (cur_policy)
- *cur_policy = iprv.v0;
- if (num_impacted)
- *num_impacted = iprv.v1;
- if (la)
- *la = iprv.v2;
- return iprv.status;
-}
-
-/* Make the processor enter HALT or one of the implementation dependent low
- * power states where prefetching and execution are suspended and cache and
- * TLB coherency is not maintained.
- */
-static inline s64
-ia64_pal_halt (u64 halt_state)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_HALT, halt_state, 0, 0);
- return iprv.status;
-}
-
-typedef union pal_power_mgmt_info_u {
- u64 ppmi_data;
- struct {
- u64 exit_latency : 16,
- entry_latency : 16,
- power_consumption : 28,
- im : 1,
- co : 1,
- reserved : 2;
- } pal_power_mgmt_info_s;
-} pal_power_mgmt_info_u_t;
-
-/* Return information about processor's optional power management capabilities. */
-static inline s64
-ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
- return iprv.status;
-}
-
-/* Get the current P-state information */
-static inline s64
-ia64_pal_get_pstate (u64 *pstate_index, unsigned long type)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_GET_PSTATE, type, 0, 0);
- *pstate_index = iprv.v0;
- return iprv.status;
-}
-
-/* Set the P-state */
-static inline s64
-ia64_pal_set_pstate (u64 pstate_index)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_SET_PSTATE, pstate_index, 0, 0);
- return iprv.status;
-}
-
-/* Processor branding information*/
-static inline s64
-ia64_pal_get_brand_info (char *brand_info)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_BRAND_INFO, 0, (u64)brand_info, 0);
- return iprv.status;
-}
-
-/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are
- * suspended, but cache and TLB coherency is maintained.
- */
-static inline s64
-ia64_pal_halt_light (void)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_HALT_LIGHT, 0, 0, 0);
- return iprv.status;
-}
-
-/* Clear all the processor error logging registers and reset the indicator that allows
- * the error logging registers to be written. This procedure also checks the pending
- * machine check bit and pending INIT bit and reports their states.
- */
-static inline s64
-ia64_pal_mc_clear_log (u64 *pending_vector)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MC_CLEAR_LOG, 0, 0, 0);
- if (pending_vector)
- *pending_vector = iprv.v0;
- return iprv.status;
-}
-
-/* Ensure that all outstanding transactions in a processor are completed or that any
- * MCA due to thes outstanding transaction is taken.
- */
-static inline s64
-ia64_pal_mc_drain (void)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MC_DRAIN, 0, 0, 0);
- return iprv.status;
-}
-
-/* Return the machine check dynamic processor state */
-static inline s64
-ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0);
- if (size)
- *size = iprv.v0;
- if (pds)
- *pds = iprv.v1;
- return iprv.status;
-}
-
-/* Return processor machine check information */
-static inline s64
-ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_info)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MC_ERROR_INFO, info_index, type_index, 0);
- if (size)
- *size = iprv.v0;
- if (error_info)
- *error_info = iprv.v1;
- return iprv.status;
-}
-
-/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK willnot
- * attempt to correct any expected machine checks.
- */
-static inline s64
-ia64_pal_mc_expected (u64 expected, u64 *previous)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MC_EXPECTED, expected, 0, 0);
- if (previous)
- *previous = iprv.v0;
- return iprv.status;
-}
-
-/* Register a platform dependent location with PAL to which it can save
- * minimal processor state in the event of a machine check or initialization
- * event.
- */
-static inline s64
-ia64_pal_mc_register_mem (u64 physical_addr)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0);
- return iprv.status;
-}
-
-/* Restore minimal architectural processor state, set CMC interrupt if necessary
- * and resume execution
- */
-static inline s64
-ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MC_RESUME, set_cmci, save_ptr, 0);
- return iprv.status;
-}
-
-/* Return the memory attributes implemented by the processor */
-static inline s64
-ia64_pal_mem_attrib (u64 *mem_attrib)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0);
- if (mem_attrib)
- *mem_attrib = iprv.v0 & 0xff;
- return iprv.status;
-}
-
-/* Return the amount of memory needed for second phase of processor
- * self-test and the required alignment of memory.
- */
-static inline s64
-ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MEM_FOR_TEST, 0, 0, 0);
- if (bytes_needed)
- *bytes_needed = iprv.v0;
- if (alignment)
- *alignment = iprv.v1;
- return iprv.status;
-}
-
-typedef union pal_perf_mon_info_u {
- u64 ppmi_data;
- struct {
- u64 generic : 8,
- width : 8,
- cycles : 8,
- retired : 8,
- reserved : 32;
- } pal_perf_mon_info_s;
-} pal_perf_mon_info_u_t;
-
-/* Return the performance monitor information about what can be counted
- * and how to configure the monitors to count the desired events.
- */
-static inline s64
-ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_PERF_MON_INFO, (unsigned long) pm_buffer, 0, 0);
- if (pm_info)
- pm_info->ppmi_data = iprv.v0;
- return iprv.status;
-}
-
-/* Specifies the physical address of the processor interrupt block
- * and I/O port space.
- */
-static inline s64
-ia64_pal_platform_addr (u64 type, u64 physical_addr)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_PLATFORM_ADDR, type, physical_addr, 0);
- return iprv.status;
-}
-
-/* Set the SAL PMI entrypoint in memory */
-static inline s64
-ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_PMI_ENTRYPOINT, sal_pmi_entry_addr, 0, 0);
- return iprv.status;
-}
-
-struct pal_features_s;
-/* Provide information about configurable processor features */
-static inline s64
-ia64_pal_proc_get_features (u64 *features_avail,
- u64 *features_status,
- u64 *features_control)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0);
- if (iprv.status == 0) {
- *features_avail = iprv.v0;
- *features_status = iprv.v1;
- *features_control = iprv.v2;
- }
- return iprv.status;
-}
-
-/* Enable/disable processor dependent features */
-static inline s64
-ia64_pal_proc_set_features (u64 feature_select)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0);
- return iprv.status;
-}
-
-/*
- * Put everything in a struct so we avoid the global offset table whenever
- * possible.
- */
-typedef struct ia64_ptce_info_s {
- u64 base;
- u32 count[2];
- u32 stride[2];
-} ia64_ptce_info_t;
-
-/* Return the information required for the architected loop used to purge
- * (initialize) the entire TC
- */
-static inline s64
-ia64_get_ptce (ia64_ptce_info_t *ptce)
-{
- struct ia64_pal_retval iprv;
-
- if (!ptce)
- return -1;
-
- PAL_CALL(iprv, PAL_PTCE_INFO, 0, 0, 0);
- if (iprv.status == 0) {
- ptce->base = iprv.v0;
- ptce->count[0] = iprv.v1 >> 32;
- ptce->count[1] = iprv.v1 & 0xffffffff;
- ptce->stride[0] = iprv.v2 >> 32;
- ptce->stride[1] = iprv.v2 & 0xffffffff;
- }
- return iprv.status;
-}
-
-/* Return info about implemented application and control registers. */
-static inline s64
-ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_REGISTER_INFO, info_request, 0, 0);
- if (reg_info_1)
- *reg_info_1 = iprv.v0;
- if (reg_info_2)
- *reg_info_2 = iprv.v1;
- return iprv.status;
-}
-
-typedef union pal_hints_u {
- u64 ph_data;
- struct {
- u64 si : 1,
- li : 1,
- reserved : 62;
- } pal_hints_s;
-} pal_hints_u_t;
-
-/* Return information about the register stack and RSE for this processor
- * implementation.
- */
-static inline s64
-ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0);
- if (num_phys_stacked)
- *num_phys_stacked = iprv.v0;
- if (hints)
- hints->ph_data = iprv.v1;
- return iprv.status;
-}
-
-/*
- * Set the current hardware resource sharing policy of the processor
- */
-static inline s64
-ia64_pal_set_hw_policy (u64 policy)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_SET_HW_POLICY, policy, 0, 0);
- return iprv.status;
-}
-
-/* Cause the processor to enter SHUTDOWN state, where prefetching and execution are
- * suspended, but cause cache and TLB coherency to be maintained.
- * This is usually called in IA-32 mode.
- */
-static inline s64
-ia64_pal_shutdown (void)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_SHUTDOWN, 0, 0, 0);
- return iprv.status;
-}
-
-/* Perform the second phase of processor self-test. */
-static inline s64
-ia64_pal_test_proc (u64 test_addr, u64 test_size, u64 attributes, u64 *self_test_state)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_TEST_PROC, test_addr, test_size, attributes);
- if (self_test_state)
- *self_test_state = iprv.v0;
- return iprv.status;
-}
-
-typedef union pal_version_u {
- u64 pal_version_val;
- struct {
- u64 pv_pal_b_rev : 8;
- u64 pv_pal_b_model : 8;
- u64 pv_reserved1 : 8;
- u64 pv_pal_vendor : 8;
- u64 pv_pal_a_rev : 8;
- u64 pv_pal_a_model : 8;
- u64 pv_reserved2 : 16;
- } pal_version_s;
-} pal_version_u_t;
-
-
-/*
- * Return PAL version information. While the documentation states that
- * PAL_VERSION can be called in either physical or virtual mode, some
- * implementations only allow physical calls. We don't call it very often,
- * so the overhead isn't worth eliminating.
- */
-static inline s64
-ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0);
- if (pal_min_version)
- pal_min_version->pal_version_val = iprv.v0;
-
- if (pal_cur_version)
- pal_cur_version->pal_version_val = iprv.v1;
-
- return iprv.status;
-}
-
-typedef union pal_tc_info_u {
- u64 pti_val;
- struct {
- u64 num_sets : 8,
- associativity : 8,
- num_entries : 16,
- pf : 1,
- unified : 1,
- reduce_tr : 1,
- reserved : 29;
- } pal_tc_info_s;
-} pal_tc_info_u_t;
-
-#define tc_reduce_tr pal_tc_info_s.reduce_tr
-#define tc_unified pal_tc_info_s.unified
-#define tc_pf pal_tc_info_s.pf
-#define tc_num_entries pal_tc_info_s.num_entries
-#define tc_associativity pal_tc_info_s.associativity
-#define tc_num_sets pal_tc_info_s.num_sets
-
-
-/* Return information about the virtual memory characteristics of the processor
- * implementation.
- */
-static inline s64
-ia64_pal_vm_info (u64 tc_level, u64 tc_type, pal_tc_info_u_t *tc_info, u64 *tc_pages)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_VM_INFO, tc_level, tc_type, 0);
- if (tc_info)
- tc_info->pti_val = iprv.v0;
- if (tc_pages)
- *tc_pages = iprv.v1;
- return iprv.status;
-}
-
-/* Get page size information about the virtual memory characteristics of the processor
- * implementation.
- */
-static inline s64
-ia64_pal_vm_page_size (u64 *tr_pages, u64 *vw_pages)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0);
- if (tr_pages)
- *tr_pages = iprv.v0;
- if (vw_pages)
- *vw_pages = iprv.v1;
- return iprv.status;
-}
-
-typedef union pal_vm_info_1_u {
- u64 pvi1_val;
- struct {
- u64 vw : 1,
- phys_add_size : 7,
- key_size : 8,
- max_pkr : 8,
- hash_tag_id : 8,
- max_dtr_entry : 8,
- max_itr_entry : 8,
- max_unique_tcs : 8,
- num_tc_levels : 8;
- } pal_vm_info_1_s;
-} pal_vm_info_1_u_t;
-
-#define PAL_MAX_PURGES 0xFFFF /* all ones is means unlimited */
-
-typedef union pal_vm_info_2_u {
- u64 pvi2_val;
- struct {
- u64 impl_va_msb : 8,
- rid_size : 8,
- max_purges : 16,
- reserved : 32;
- } pal_vm_info_2_s;
-} pal_vm_info_2_u_t;
-
-/* Get summary information about the virtual memory characteristics of the processor
- * implementation.
- */
-static inline s64
-ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_VM_SUMMARY, 0, 0, 0);
- if (vm_info_1)
- vm_info_1->pvi1_val = iprv.v0;
- if (vm_info_2)
- vm_info_2->pvi2_val = iprv.v1;
- return iprv.status;
-}
-
-typedef union pal_itr_valid_u {
- u64 piv_val;
- struct {
- u64 access_rights_valid : 1,
- priv_level_valid : 1,
- dirty_bit_valid : 1,
- mem_attr_valid : 1,
- reserved : 60;
- } pal_tr_valid_s;
-} pal_tr_valid_u_t;
-
-/* Read a translation register */
-static inline s64
-ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t *tr_valid)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, tr_type,(u64)ia64_tpa(tr_buffer));
- if (tr_valid)
- tr_valid->piv_val = iprv.v0;
- return iprv.status;
-}
-
-/*
- * PAL_PREFETCH_VISIBILITY transaction types
- */
-#define PAL_VISIBILITY_VIRTUAL 0
-#define PAL_VISIBILITY_PHYSICAL 1
-
-/*
- * PAL_PREFETCH_VISIBILITY return codes
- */
-#define PAL_VISIBILITY_OK 1
-#define PAL_VISIBILITY_OK_REMOTE_NEEDED 0
-#define PAL_VISIBILITY_INVAL_ARG -2
-#define PAL_VISIBILITY_ERROR -3
-
-static inline s64
-ia64_pal_prefetch_visibility (s64 trans_type)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_PREFETCH_VISIBILITY, trans_type, 0, 0);
- return iprv.status;
-}
-
-/* data structure for getting information on logical to physical mappings */
-typedef union pal_log_overview_u {
- struct {
- u64 num_log :16, /* Total number of logical
- * processors on this die
- */
- tpc :8, /* Threads per core */
- reserved3 :8, /* Reserved */
- cpp :8, /* Cores per processor */
- reserved2 :8, /* Reserved */
- ppid :8, /* Physical processor ID */
- reserved1 :8; /* Reserved */
- } overview_bits;
- u64 overview_data;
-} pal_log_overview_t;
-
-typedef union pal_proc_n_log_info1_u{
- struct {
- u64 tid :16, /* Thread id */
- reserved2 :16, /* Reserved */
- cid :16, /* Core id */
- reserved1 :16; /* Reserved */
- } ppli1_bits;
- u64 ppli1_data;
-} pal_proc_n_log_info1_t;
-
-typedef union pal_proc_n_log_info2_u {
- struct {
- u64 la :16, /* Logical address */
- reserved :48; /* Reserved */
- } ppli2_bits;
- u64 ppli2_data;
-} pal_proc_n_log_info2_t;
-
-typedef struct pal_logical_to_physical_s
-{
- pal_log_overview_t overview;
- pal_proc_n_log_info1_t ppli1;
- pal_proc_n_log_info2_t ppli2;
-} pal_logical_to_physical_t;
-
-#define overview_num_log overview.overview_bits.num_log
-#define overview_tpc overview.overview_bits.tpc
-#define overview_cpp overview.overview_bits.cpp
-#define overview_ppid overview.overview_bits.ppid
-#define log1_tid ppli1.ppli1_bits.tid
-#define log1_cid ppli1.ppli1_bits.cid
-#define log2_la ppli2.ppli2_bits.la
-
-/* Get information on logical to physical processor mappings. */
-static inline s64
-ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL(iprv, PAL_LOGICAL_TO_PHYSICAL, proc_number, 0, 0);
-
- if (iprv.status == PAL_STATUS_SUCCESS)
- {
- mapping->overview.overview_data = iprv.v0;
- mapping->ppli1.ppli1_data = iprv.v1;
- mapping->ppli2.ppli2_data = iprv.v2;
- }
-
- return iprv.status;
-}
-
-typedef struct pal_cache_shared_info_s
-{
- u64 num_shared;
- pal_proc_n_log_info1_t ppli1;
- pal_proc_n_log_info2_t ppli2;
-} pal_cache_shared_info_t;
-
-/* Get information on logical to physical processor mappings. */
-static inline s64
-ia64_pal_cache_shared_info(u64 level,
- u64 type,
- u64 proc_number,
- pal_cache_shared_info_t *info)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL(iprv, PAL_CACHE_SHARED_INFO, level, type, proc_number);
-
- if (iprv.status == PAL_STATUS_SUCCESS) {
- info->num_shared = iprv.v0;
- info->ppli1.ppli1_data = iprv.v1;
- info->ppli2.ppli2_data = iprv.v2;
- }
-
- return iprv.status;
-}
-#ifdef XEN
-#include <asm/vmx_pal.h>
-#endif
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_IA64_PAL_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/pci.h b/xen/include/asm-ia64/linux-xen/asm/pci.h
deleted file mode 100644
index 95ffd407c8..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/pci.h
+++ /dev/null
@@ -1,185 +0,0 @@
-#ifndef _ASM_IA64_PCI_H
-#define _ASM_IA64_PCI_H
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#ifdef XEN
-#include <linux/ioport.h>
-#endif
-
-#include <asm/io.h>
-#include <asm/scatterlist.h>
-
-/*
- * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
- * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
- * loader.
- */
-#define pcibios_assign_all_busses() 0
-#define pcibios_scan_all_fns(a, b) 0
-
-#define PCIBIOS_MIN_IO 0x1000
-#define PCIBIOS_MIN_MEM 0x10000000
-
-void pcibios_config_init(void);
-
-struct pci_dev;
-
-#ifdef XEN
-struct arch_pci_dev {};
-#endif
-
-/*
- * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence
- * between device bus addresses and CPU physical addresses. Platforms with a hardware I/O
- * MMU _must_ turn this off to suppress the bounce buffer handling code in the block and
- * network device layers. Platforms with separate bus address spaces _must_ turn this off
- * and provide a device DMA mapping implementation that takes care of the necessary
- * address translation.
- *
- * For now, the ia64 platforms which may have separate/multiple bus address spaces all
- * have I/O MMUs which support the merging of physically discontiguous buffers, so we can
- * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
- */
-extern unsigned long ia64_max_iommu_merge_mask;
-#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
-
-#ifndef XEN
-static inline void
-pcibios_set_master (struct pci_dev *dev)
-{
- /* No special bus mastering setup handling */
-}
-
-static inline void
-pcibios_penalize_isa_irq (int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
-#define HAVE_ARCH_PCI_MWI 1
-extern int pcibios_prep_mwi (struct pci_dev *);
-
-#include <asm-generic/pci-dma-compat.h>
-
-/* pci_unmap_{single,page} is not a nop, thus... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
- dma_addr_t ADDR_NAME;
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
- __u32 LEN_NAME;
-#define pci_unmap_addr(PTR, ADDR_NAME) \
- ((PTR)->ADDR_NAME)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
- (((PTR)->ADDR_NAME) = (VAL))
-#define pci_unmap_len(PTR, LEN_NAME) \
- ((PTR)->LEN_NAME)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
- (((PTR)->LEN_NAME) = (VAL))
-
-/* The ia64 platform always supports 64-bit addressing. */
-#define pci_dac_dma_supported(pci_dev, mask) (1)
-#define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off))
-#define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr)))
-#define pci_dac_dma_to_offset(dev,dma_addr) offset_in_page(dma_addr)
-#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir) do { } while (0)
-#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir) do { mb(); } while (0)
-
-#define sg_dma_len(sg) ((sg)->dma_length)
-#define sg_dma_address(sg) ((sg)->dma_address)
-
-#ifdef CONFIG_PCI
-static inline void pci_dma_burst_advice(struct pci_dev *pdev,
- enum pci_dma_burst_strategy *strat,
- unsigned long *strategy_parameter)
-{
- unsigned long cacheline_size;
- u8 byte;
-
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
- if (byte == 0)
- cacheline_size = 1024;
- else
- cacheline_size = (int) byte * 4;
-
- *strat = PCI_DMA_BURST_MULTIPLE;
- *strategy_parameter = cacheline_size;
-}
-#endif
-
-#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-#define HAVE_PCI_LEGACY
-extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
- struct vm_area_struct *vma);
-extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
- size_t count);
-extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
- size_t count);
-extern int pci_mmap_legacy_mem(struct kobject *kobj,
- struct bin_attribute *attr,
- struct vm_area_struct *vma);
-#endif
-
-#define pci_get_legacy_mem platform_pci_get_legacy_mem
-#define pci_legacy_read platform_pci_legacy_read
-#define pci_legacy_write platform_pci_legacy_write
-
-struct pci_window {
- struct resource resource;
- u64 offset;
-};
-
-struct pci_controller {
- void *acpi_handle;
- void *iommu;
- int segment;
- int node; /* nearest node with memory or -1 for global allocation */
-
- unsigned int windows;
- struct pci_window *window;
-
- void *platform_data;
-};
-
-#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
-#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
-
-#ifndef XEN
-extern struct pci_ops pci_root_ops;
-
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- return (pci_domain_nr(bus) != 0);
-}
-
-static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-{
-}
-
-extern void pcibios_resource_to_bus(struct pci_dev *dev,
- struct pci_bus_region *region, struct resource *res);
-
-extern void pcibios_bus_to_resource(struct pci_dev *dev,
- struct resource *res, struct pci_bus_region *region);
-
-static inline struct resource *
-pcibios_select_root(struct pci_dev *pdev, struct resource *res)
-{
- struct resource *root = NULL;
-
- if (res->flags & IORESOURCE_IO)
- root = &ioport_resource;
- if (res->flags & IORESOURCE_MEM)
- root = &iomem_resource;
-
- return root;
-}
-#endif
-
-#define pcibios_scan_all_fns(a, b) 0
-
-#endif /* _ASM_IA64_PCI_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/percpu.h b/xen/include/asm-ia64/linux-xen/asm/percpu.h
deleted file mode 100644
index 3bd030327c..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/percpu.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#ifndef _ASM_IA64_PERCPU_H
-#define _ASM_IA64_PERCPU_H
-
-/*
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
-
-#ifdef __ASSEMBLY__
-# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
-#else /* !__ASSEMBLY__ */
-
-#include <linux/config.h>
-
-#include <linux/threads.h>
-
-#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
-# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
-#else
-# define __SMALL_ADDR_AREA
-#endif
-
-#define DECLARE_PER_CPU(type, name) \
- extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-/* Separate out the type, so (int[3], foo) works. */
-#define __DEFINE_PER_CPU(type, name, suffix) \
- __attribute__((__section__(".data.percpu" #suffix))) \
- __SMALL_ADDR_AREA __typeof__(type) per_cpu_##name
-
-/*
- * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
- * external routine, to avoid include-hell.
- */
-#ifdef CONFIG_SMP
-
-extern unsigned long __per_cpu_offset[NR_CPUS];
-
-/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
-DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
-
-#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
-#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
-#ifdef XEN
-#define per_cpu_addr(var, cpu) (RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
-#endif
-
-extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
-extern void setup_per_cpu_areas (void);
-extern void *per_cpu_init(void);
-#ifdef XEN
-extern void *per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa);
-#endif
-
-#else /* ! SMP */
-
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
-#define __get_cpu_var(var) per_cpu__##var
-#define per_cpu_init() (__phys_per_cpu_start)
-#ifdef XEN
-static inline void *per_cpu_allocate(void *xen_heap_start,
- unsigned long end_in_pa)
-{
- return xen_heap_start;
-}
-#endif
-
-#endif /* SMP */
-
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
-/*
- * Be extremely careful when taking the address of this variable! Due to virtual
- * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
- * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
- * more efficient.
- */
-#define __ia64_per_cpu_var(var) (per_cpu__##var)
-
-DECLARE_PER_CPU(struct vcpu *, fp_owner);
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_IA64_PERCPU_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/perfmon.h b/xen/include/asm-ia64/linux-xen/asm/perfmon.h
deleted file mode 100644
index af189b05cb..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/perfmon.h
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Copyright (C) 2001-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-
-#ifndef _ASM_IA64_PERFMON_H
-#define _ASM_IA64_PERFMON_H
-
-#ifdef XEN
-#ifndef pt_regs
-#define pt_regs cpu_user_regs
-#endif
-struct cpu_user_regs;
-#endif
-
-/*
- * perfmon comamnds supported on all CPU models
- */
-#define PFM_WRITE_PMCS 0x01
-#define PFM_WRITE_PMDS 0x02
-#define PFM_READ_PMDS 0x03
-#define PFM_STOP 0x04
-#define PFM_START 0x05
-#define PFM_ENABLE 0x06 /* obsolete */
-#define PFM_DISABLE 0x07 /* obsolete */
-#define PFM_CREATE_CONTEXT 0x08
-#define PFM_DESTROY_CONTEXT 0x09 /* obsolete use close() */
-#define PFM_RESTART 0x0a
-#define PFM_PROTECT_CONTEXT 0x0b /* obsolete */
-#define PFM_GET_FEATURES 0x0c
-#define PFM_DEBUG 0x0d
-#define PFM_UNPROTECT_CONTEXT 0x0e /* obsolete */
-#define PFM_GET_PMC_RESET_VAL 0x0f
-#define PFM_LOAD_CONTEXT 0x10
-#define PFM_UNLOAD_CONTEXT 0x11
-
-/*
- * PMU model specific commands (may not be supported on all PMU models)
- */
-#define PFM_WRITE_IBRS 0x20
-#define PFM_WRITE_DBRS 0x21
-
-/*
- * context flags
- */
-#define PFM_FL_NOTIFY_BLOCK 0x01 /* block task on user level notifications */
-#define PFM_FL_SYSTEM_WIDE 0x02 /* create a system wide context */
-#define PFM_FL_OVFL_NO_MSG 0x80 /* do not post overflow/end messages for notification */
-
-/*
- * event set flags
- */
-#define PFM_SETFL_EXCL_IDLE 0x01 /* exclude idle task (syswide only) XXX: DO NOT USE YET */
-
-/*
- * PMC flags
- */
-#define PFM_REGFL_OVFL_NOTIFY 0x1 /* send notification on overflow */
-#define PFM_REGFL_RANDOM 0x2 /* randomize sampling interval */
-
-/*
- * PMD/PMC/IBR/DBR return flags (ignored on input)
- *
- * Those flags are used on output and must be checked in case EAGAIN is returned
- * by any of the calls using a pfarg_reg_t or pfarg_dbreg_t structure.
- */
-#define PFM_REG_RETFL_NOTAVAIL (1UL<<31) /* set if register is implemented but not available */
-#define PFM_REG_RETFL_EINVAL (1UL<<30) /* set if register entry is invalid */
-#define PFM_REG_RETFL_MASK (PFM_REG_RETFL_NOTAVAIL|PFM_REG_RETFL_EINVAL)
-
-#define PFM_REG_HAS_ERROR(flag) (((flag) & PFM_REG_RETFL_MASK) != 0)
-
-typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type */
-
-/*
- * Request structure used to define a context
- */
-typedef struct {
- pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
- unsigned long ctx_flags; /* noblock/block */
- unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */
- unsigned short ctx_reserved1; /* for future use */
- int ctx_fd; /* return arg: unique identification for context */
- void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
- unsigned long ctx_reserved2[11];/* for future use */
-} pfarg_context_t;
-
-/*
- * Request structure used to write/read a PMC or PMD
- */
-typedef struct {
- unsigned int reg_num; /* which register */
- unsigned short reg_set; /* event set for this register */
- unsigned short reg_reserved1; /* for future use */
-
- unsigned long reg_value; /* initial pmc/pmd value */
- unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
-
- unsigned long reg_long_reset; /* reset after buffer overflow notification */
- unsigned long reg_short_reset; /* reset after counter overflow */
-
- unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
- unsigned long reg_random_seed; /* seed value when randomization is used */
- unsigned long reg_random_mask; /* bitmask used to limit random value */
- unsigned long reg_last_reset_val;/* return: PMD last reset value */
-
- unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */
- unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
-
- unsigned long reg_reserved2[3]; /* for future use */
-} pfarg_reg_t;
-
-typedef struct {
- unsigned int dbreg_num; /* which debug register */
- unsigned short dbreg_set; /* event set for this register */
- unsigned short dbreg_reserved1; /* for future use */
- unsigned long dbreg_value; /* value for debug register */
- unsigned long dbreg_flags; /* return: dbreg error */
- unsigned long dbreg_reserved2[1]; /* for future use */
-} pfarg_dbreg_t;
-
-typedef struct {
- unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */
- unsigned int ft_reserved; /* reserved for future use */
- unsigned long reserved[4]; /* for future use */
-} pfarg_features_t;
-
-typedef struct {
- pid_t load_pid; /* process to load the context into */
- unsigned short load_set; /* first event set to load */
- unsigned short load_reserved1; /* for future use */
- unsigned long load_reserved2[3]; /* for future use */
-} pfarg_load_t;
-
-typedef struct {
- int msg_type; /* generic message header */
- int msg_ctx_fd; /* generic message header */
- unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */
- unsigned short msg_active_set; /* active set at the time of overflow */
- unsigned short msg_reserved1; /* for future use */
- unsigned int msg_reserved2; /* for future use */
- unsigned long msg_tstamp; /* for perf tuning/debug */
-} pfm_ovfl_msg_t;
-
-typedef struct {
- int msg_type; /* generic message header */
- int msg_ctx_fd; /* generic message header */
- unsigned long msg_tstamp; /* for perf tuning */
-} pfm_end_msg_t;
-
-typedef struct {
- int msg_type; /* type of the message */
- int msg_ctx_fd; /* unique identifier for the context */
- unsigned long msg_tstamp; /* for perf tuning */
-} pfm_gen_msg_t;
-
-#define PFM_MSG_OVFL 1 /* an overflow happened */
-#define PFM_MSG_END 2 /* task to which context was attached ended */
-
-typedef union {
- pfm_ovfl_msg_t pfm_ovfl_msg;
- pfm_end_msg_t pfm_end_msg;
- pfm_gen_msg_t pfm_gen_msg;
-} pfm_msg_t;
-
-/*
- * Define the version numbers for both perfmon as a whole and the sampling buffer format.
- */
-#define PFM_VERSION_MAJ 2U
-#define PFM_VERSION_MIN 0U
-#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
-#define PFM_VERSION_MAJOR(x) (((x)>>16) & 0xffff)
-#define PFM_VERSION_MINOR(x) ((x) & 0xffff)
-
-
-/*
- * miscellaneous architected definitions
- */
-#define PMU_FIRST_COUNTER 4 /* first counting monitor (PMC/PMD) */
-#define PMU_MAX_PMCS 256 /* maximum architected number of PMC registers */
-#define PMU_MAX_PMDS 256 /* maximum architected number of PMD registers */
-
-#ifdef __KERNEL__
-
-extern long perfmonctl(int fd, int cmd, void *arg, int narg);
-
-typedef struct {
- void (*handler)(int irq, void *arg, struct pt_regs *regs);
-} pfm_intr_handler_desc_t;
-
-extern void pfm_save_regs (struct task_struct *);
-extern void pfm_load_regs (struct task_struct *);
-
-extern void pfm_exit_thread(struct task_struct *);
-extern int pfm_use_debug_registers(struct task_struct *);
-extern int pfm_release_debug_registers(struct task_struct *);
-extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin);
-extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs);
-extern void pfm_init_percpu(void);
-extern void pfm_handle_work(void);
-extern int pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
-extern int pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
-
-
-
-/*
- * Reset PMD register flags
- */
-#define PFM_PMD_SHORT_RESET 0
-#define PFM_PMD_LONG_RESET 1
-
-typedef union {
- unsigned int val;
- struct {
- unsigned int notify_user:1; /* notify user program of overflow */
- unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */
- unsigned int block_task:1; /* block monitored task on kernel exit */
- unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
- unsigned int reserved:28; /* for future use */
- } bits;
-} pfm_ovfl_ctrl_t;
-
-typedef struct {
- unsigned char ovfl_pmd; /* index of overflowed PMD */
- unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */
- unsigned short active_set; /* event set active at the time of the overflow */
- pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
-
- unsigned long pmd_last_reset; /* last reset value of of the PMD */
- unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */
- unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
- unsigned long pmd_value; /* current 64-bit value of the PMD */
- unsigned long pmd_eventid; /* eventid associated with PMD */
-} pfm_ovfl_arg_t;
-
-
-typedef struct {
- char *fmt_name;
- pfm_uuid_t fmt_uuid;
- size_t fmt_arg_size;
- unsigned long fmt_flags;
-
- int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
- int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
- int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
- int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
- int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
- int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
- int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
-
- struct list_head fmt_list;
-} pfm_buffer_fmt_t;
-
-extern int pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt);
-extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
-
-/*
- * perfmon interface exported to modules
- */
-extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
-extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
-extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
-extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
-
-/*
- * describe the content of the local_cpu_date->pfm_syst_info field
- */
-#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exists */
-#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
-#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
-
-/*
- * sysctl control structure. visible to sampling formats
- */
-typedef struct {
- int debug; /* turn on/off debugging via syslog */
- int debug_ovfl; /* turn on/off debug printk in overflow handler */
- int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
- int expert_mode; /* turn on/off value checking */
-} pfm_sysctl_t;
-extern pfm_sysctl_t pfm_sysctl;
-
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_IA64_PERFMON_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/perfmon_default_smpl.h b/xen/include/asm-ia64/linux-xen/asm/perfmon_default_smpl.h
deleted file mode 100644
index 0d2c54fc50..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/perfmon_default_smpl.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- *
- * This file implements the default sampling buffer format
- * for Linux/ia64 perfmon subsystem.
- */
-#ifndef __PERFMON_DEFAULT_SMPL_H__
-#define __PERFMON_DEFAULT_SMPL_H__ 1
-
-#define PFM_DEFAULT_SMPL_UUID { \
- 0x4d, 0x72, 0xbe, 0xc0, 0x06, 0x64, 0x41, 0x43, 0x82, 0xb4, 0xd3, 0xfd, 0x27, 0x24, 0x3c, 0x97}
-
-/*
- * format specific parameters (passed at context creation)
- */
-typedef struct {
- unsigned long buf_size; /* size of the buffer in bytes */
- unsigned int flags; /* buffer specific flags */
- unsigned int res1; /* for future use */
- unsigned long reserved[2]; /* for future use */
-} pfm_default_smpl_arg_t;
-
-/*
- * combined context+format specific structure. Can be passed
- * to PFM_CONTEXT_CREATE
- */
-typedef struct {
- pfarg_context_t ctx_arg;
- pfm_default_smpl_arg_t buf_arg;
-} pfm_default_smpl_ctx_arg_t;
-
-/*
- * This header is at the beginning of the sampling buffer returned to the user.
- * It is directly followed by the first record.
- */
-typedef struct {
- unsigned long hdr_count; /* how many valid entries */
- unsigned long hdr_cur_offs; /* current offset from top of buffer */
- unsigned long hdr_reserved2; /* reserved for future use */
-
- unsigned long hdr_overflows; /* how many times the buffer overflowed */
- unsigned long hdr_buf_size; /* how many bytes in the buffer */
-
- unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */
- unsigned int hdr_reserved1; /* for future use */
- unsigned long hdr_reserved[10]; /* for future use */
-} pfm_default_smpl_hdr_t;
-
-/*
- * Entry header in the sampling buffer. The header is directly followed
- * with the values of the PMD registers of interest saved in increasing
- * index order: PMD4, PMD5, and so on. How many PMDs are present depends
- * on how the session was programmed.
- *
- * In the case where multiple counters overflow at the same time, multiple
- * entries are written consecutively.
- *
- * last_reset_value member indicates the initial value of the overflowed PMD.
- */
-typedef struct {
- int pid; /* thread id (for NPTL, this is gettid()) */
- unsigned char reserved1[3]; /* reserved for future use */
- unsigned char ovfl_pmd; /* index of overflowed PMD */
-
- unsigned long last_reset_val; /* initial value of overflowed PMD */
- unsigned long ip; /* where did the overflow interrupt happened */
- unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
-
- unsigned short cpu; /* cpu on which the overfow occurred */
- unsigned short set; /* event set active when overflow ocurred */
- int tgid; /* thread group id (for NPTL, this is getpid()) */
-} pfm_default_smpl_entry_t;
-
-#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
-#define PFM_DEFAULT_MAX_ENTRY_SIZE (sizeof(pfm_default_smpl_entry_t)+(sizeof(unsigned long)*PFM_DEFAULT_MAX_PMDS))
-#define PFM_DEFAULT_SMPL_MIN_BUF_SIZE (sizeof(pfm_default_smpl_hdr_t)+PFM_DEFAULT_MAX_ENTRY_SIZE)
-
-#define PFM_DEFAULT_SMPL_VERSION_MAJ 2U
-#define PFM_DEFAULT_SMPL_VERSION_MIN 0U
-#define PFM_DEFAULT_SMPL_VERSION (((PFM_DEFAULT_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_DEFAULT_SMPL_VERSION_MIN & 0xffff))
-
-#endif /* __PERFMON_DEFAULT_SMPL_H__ */
diff --git a/xen/include/asm-ia64/linux-xen/asm/pgalloc.h b/xen/include/asm-ia64/linux-xen/asm/pgalloc.h
deleted file mode 100644
index 9adb3b7c7e..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/pgalloc.h
+++ /dev/null
@@ -1,221 +0,0 @@
-#ifndef _ASM_IA64_PGALLOC_H
-#define _ASM_IA64_PGALLOC_H
-
-/*
- * This file contains the functions and defines necessary to allocate
- * page tables.
- *
- * This hopefully works with any (fixed) ia-64 page-size, as defined
- * in <asm/page.h> (currently 8192).
- *
- * Copyright (C) 1998-2001 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
- */
-
-#include <linux/config.h>
-
-#include <linux/compiler.h>
-#include <linux/mm.h>
-#include <linux/page-flags.h>
-#include <linux/threads.h>
-
-#include <asm/mmu_context.h>
-
-#ifndef XEN
-DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
-#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
-DECLARE_PER_CPU(long, __pgtable_quicklist_size);
-#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
-
-static inline long pgtable_quicklist_total_size(void)
-{
- long ql_size = 0;
- int cpuid;
-
- for_each_online_cpu(cpuid) {
- ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
- }
- return ql_size;
-}
-
-static inline void *pgtable_quicklist_alloc(void)
-{
- unsigned long *ret = NULL;
-
- preempt_disable();
-
- ret = pgtable_quicklist;
- if (likely(ret != NULL)) {
- pgtable_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- --pgtable_quicklist_size;
- preempt_enable();
- } else {
- preempt_enable();
- ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- }
-
- return ret;
-}
-
-static inline void pgtable_quicklist_free(void *pgtable_entry)
-{
-#ifdef CONFIG_NUMA
- unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
-
- if (unlikely(nid != numa_node_id())) {
- free_page((unsigned long)pgtable_entry);
- return;
- }
-#endif
-
- preempt_disable();
- *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
- pgtable_quicklist = (unsigned long *)pgtable_entry;
- ++pgtable_quicklist_size;
- preempt_enable();
-}
-#endif
-
-#ifdef XEN
-#include <asm/pgtable.h>
-#ifdef __PAGETABLE_PUD_FOLDED
-# define pgd_cmpxchg_rel(mm, pgd, old_pud, new_pud) ({(void)old_pud;1;})
-#else
-# error "implement pgd_cmpxchg_rel()!"
-#endif
-#endif
-
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- return pgtable_quicklist_alloc();
-}
-
-#ifndef XEN
-static inline void pgd_free(pgd_t * pgd)
-{
- pgtable_quicklist_free(pgd);
-}
-#else
-static inline void pgd_free(volatile pgd_t * pgd)
-{
- pgtable_quicklist_free((void*)pgd);
-}
-#endif
-
-static inline void
-pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
-{
- pud_val(*pud_entry) = __pa(pmd);
-}
-
-#ifdef XEN
-static inline int
-pud_cmpxchg_rel(struct mm_struct *mm, volatile pud_t * pud_entry,
- pmd_t * old_pmd, volatile pmd_t * new_pmd)
-{
-#ifdef CONFIG_SMP
- unsigned long r;
- r = cmpxchg_rel(&pud_val(*pud_entry), __pa(old_pmd), __pa(new_pmd));
- return (r == __pa(old_pmd));
-#else
- if (pud_val(*pud_entry) == __pa(old_pmd)) {
- pud_val(*pud_entry) = __pa(new_pmd);
- return 1;
- }
- return 0;
-#endif
-}
-#endif
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- return pgtable_quicklist_alloc();
-}
-
-#ifndef XEN
-static inline void pmd_free(pmd_t * pmd)
-{
- pgtable_quicklist_free(pmd);
-}
-#else
-static inline void pmd_free(volatile pmd_t * pmd)
-{
- pgtable_quicklist_free((void*)pmd);
-}
-#endif
-
-#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
-
-#ifndef XEN
-static inline void
-pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
-{
- pmd_val(*pmd_entry) = page_to_maddr(pte);
-}
-#endif
-
-static inline void
-pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
-{
- pmd_val(*pmd_entry) = __pa(pte);
-}
-
-#ifdef XEN
-static inline int
-pmd_cmpxchg_kernel_rel(struct mm_struct *mm, volatile pmd_t * pmd_entry,
- pte_t * old_pte, pte_t * new_pte)
-{
-#ifdef CONFIG_SMP
- unsigned long r;
- r = cmpxchg_rel(&pmd_val(*pmd_entry), __pa(old_pte), __pa(new_pte));
- return (r == __pa(old_pte));
-#else
- if (pmd_val(*pmd_entry) == __pa(old_pte)) {
- pmd_val(*pmd_entry) = __pa(new_pte);
- return 1;
- }
- return 0;
-#endif
-}
-#endif
-
-#ifndef XEN
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long addr)
-{
- return virt_to_page(pgtable_quicklist_alloc());
-}
-#endif
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long addr)
-{
- return pgtable_quicklist_alloc();
-}
-
-#ifndef XEN
-static inline void pte_free(struct page *pte)
-{
- pgtable_quicklist_free(page_address(pte));
-}
-
-static inline void pte_free_kernel(pte_t * pte)
-{
- pgtable_quicklist_free(pte);
-}
-#else
-static inline void pte_free_kernel(volatile pte_t * pte)
-{
- pgtable_quicklist_free((void*)pte);
-}
-#endif
-
-#ifndef XEN
-#define __pte_free_tlb(tlb, pte) pte_free(pte)
-#endif
-
-extern void check_pgt_cache(void);
-
-#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/pgtable.h b/xen/include/asm-ia64/linux-xen/asm/pgtable.h
deleted file mode 100644
index adfd12a3bd..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h
+++ /dev/null
@@ -1,694 +0,0 @@
-#ifndef _ASM_IA64_PGTABLE_H
-#define _ASM_IA64_PGTABLE_H
-
-/*
- * This file contains the functions and defines necessary to modify and use
- * the IA-64 page table tree.
- *
- * This hopefully works with any (fixed) IA-64 page-size, as defined
- * in <asm/page.h>.
- *
- * Copyright (C) 1998-2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/config.h>
-
-#include <asm/mman.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/types.h>
-#ifdef XEN
-#include <asm/xenpage.h>
-#ifndef __ASSEMBLY__
-#include <xen/sched.h> /* needed for mm_struct (via asm/domain.h) */
-#endif
-#endif
-
-#ifndef XEN
-#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
-#endif
-
-/*
- * First, define the various bits in a PTE. Note that the PTE format
- * matches the VHPT short format, the firt doubleword of the VHPD long
- * format, and the first doubleword of the TLB insertion format.
- */
-#define _PAGE_P_BIT 0
-#define _PAGE_A_BIT 5
-#define _PAGE_D_BIT 6
-
-#define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
-#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
-#ifdef XEN
-#define _PAGE_RV1_BIT 1
-#define _PAGE_RV2_BIT 50
-#define _PAGE_RV1 (__IA64_UL(1) << _PAGE_RV1_BIT) /* reserved bit */
-#define _PAGE_RV2 (__IA64_UL(3) << _PAGE_RV2_BIT) /* reserved bits */
-
-#define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */
-#endif
-#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
-#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
-#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
-#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
-#define _PAGE_MA_MASK (0x7 << 2)
-#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
-#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
-#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
-#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
-#define _PAGE_PL_MASK (3 << 7)
-#define _PAGE_AR_R (0 << 9) /* read only */
-#define _PAGE_AR_RX (1 << 9) /* read & execute */
-#define _PAGE_AR_RW (2 << 9) /* read & write */
-#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
-#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
-#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
-#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
-#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
-#define _PAGE_AR_MASK (7 << 9)
-#define _PAGE_AR_SHIFT 9
-#define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
-#define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
-#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
-#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
-#ifdef XEN
-#define _PAGE_VIRT_D (__IA64_UL(1) << 53) /* Virtual dirty bit */
-#define _PAGE_PROTNONE 0
-#define _PAGE_PL_PRIV (CONFIG_CPL0_EMUL << 7)
-
-#ifdef CONFIG_XEN_IA64_TLB_TRACK
-#define _PAGE_TLB_TRACKING_BIT 54
-#define _PAGE_TLB_INSERTED_BIT 55
-#define _PAGE_TLB_INSERTED_MANY_BIT 56
-
-#define _PAGE_TLB_TRACKING (1UL << _PAGE_TLB_TRACKING_BIT)
-#define _PAGE_TLB_INSERTED (1UL << _PAGE_TLB_INSERTED_BIT)
-#define _PAGE_TLB_INSERTED_MANY (1UL << _PAGE_TLB_INSERTED_MANY_BIT)
-#define _PAGE_TLB_TRACK_MASK (_PAGE_TLB_TRACKING | \
- _PAGE_TLB_INSERTED | \
- _PAGE_TLB_INSERTED_MANY)
-
-#define pte_tlb_tracking(pte) \
- ((pte_val(pte) & _PAGE_TLB_TRACKING) != 0)
-#define pte_tlb_inserted(pte) \
- ((pte_val(pte) & _PAGE_TLB_INSERTED) != 0)
-#define pte_tlb_inserted_many(pte) \
- ((pte_val(pte) & _PAGE_TLB_INSERTED_MANY) != 0)
-#endif // CONFIG_XEN_IA64_TLB_TRACK
-
-#define _PAGE_PGC_ALLOCATED_BIT 59 /* _PGC_allocated */
-#define _PAGE_PGC_ALLOCATED (__IA64_UL(1) << _PAGE_PGC_ALLOCATED_BIT)
-
-#define _PAGE_IO_BIT 60
-#define _PAGE_IO (__IA64_UL(1) << _PAGE_IO_BIT)
-
-#else
-#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
-#endif
-
-/* Valid only for a PTE with the present bit cleared: */
-#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
-
-#define _PFN_MASK _PAGE_PPN_MASK
-/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
-#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
-
-#define _PAGE_SIZE_4K 12
-#define _PAGE_SIZE_8K 13
-#define _PAGE_SIZE_16K 14
-#define _PAGE_SIZE_64K 16
-#define _PAGE_SIZE_256K 18
-#define _PAGE_SIZE_1M 20
-#define _PAGE_SIZE_4M 22
-#define _PAGE_SIZE_16M 24
-#define _PAGE_SIZE_64M 26
-#define _PAGE_SIZE_256M 28
-#define _PAGE_SIZE_1G 30
-#define _PAGE_SIZE_4G 32
-
-#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
-#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
-#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
-
-/*
- * Definitions for first level:
- *
- * PGDIR_SHIFT determines what a first-level page table entry can map.
- */
-#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
-#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
-#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
-#define FIRST_USER_ADDRESS 0
-
-/*
- * Definitions for second level:
- *
- * PMD_SHIFT determines the size of the area a second-level page table
- * can map.
- */
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
-
-/*
- * Definitions for third level:
- */
-#define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3))
-
-/*
- * All the normal masks have the "page accessed" bits on, as any time
- * they are used, the page is accessed. They are cleared only by the
- * page-out routines.
- */
-#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
-#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
-#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
-#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
-#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
-#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
-#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
-
-# ifndef __ASSEMBLY__
-
-#include <asm/bitops.h>
-#include <asm/cacheflush.h>
-#include <asm/mmu_context.h>
-#include <asm/processor.h>
-
-/*
- * Next come the mappings that determine how mmap() protection bits
- * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
- * _P version gets used for a private shared memory segment, the _S
- * version gets used for a shared memory segment with MAP_SHARED on.
- * In a private shared memory segment, we do a copy-on-write if a task
- * attempts to write to the page.
- */
- /* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
-#define __P011 PAGE_READONLY /* ditto */
-#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
-#define __S011 PAGE_SHARED
-#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
-#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
-
-#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
-#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
-#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
-
-
-/*
- * Some definitions to translate between mem_map, PTEs, and page addresses:
- */
-
-
-/* Quick test to see if ADDR is a (potentially) valid physical address. */
-static inline long
-ia64_phys_addr_valid (unsigned long addr)
-{
- return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
-}
-
-/*
- * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
- * memory. For the return value to be meaningful, ADDR must be >=
- * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
- * require a hash-, or multi-level tree-lookup or something of that
- * sort) but it guarantees to return TRUE only if accessing the page
- * at that address does not cause an error. Note that there may be
- * addresses for which kern_addr_valid() returns FALSE even though an
- * access would not cause an error (e.g., this is typically true for
- * memory mapped I/O regions.
- *
- * XXX Need to implement this for IA-64.
- */
-#define kern_addr_valid(addr) (1)
-
-
-/*
- * Now come the defines and routines to manage and access the three-level
- * page table.
- */
-
-/*
- * On some architectures, special things need to be done when setting
- * the PTE in a page table. Nothing special needs to be on IA-64.
- */
-#define set_pte(ptep, pteval) (*(ptep) = (pteval))
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-#ifdef XEN
-static inline void
-set_pte_rel(volatile pte_t* ptep, pte_t pteval)
-{
-#if CONFIG_SMP
- asm volatile ("st8.rel [%0]=%1" ::
- "r"(&pte_val(*ptep)), "r"(pte_val(pteval)) :
- "memory");
-#else
- set_pte(ptep, pteval);
-#endif
-}
-#endif
-
-#define RGN_SIZE (1UL << 61)
-#define RGN_KERNEL 7
-
-#define VMALLOC_START 0xa000000200000000UL
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-# define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
-# define VMALLOC_END vmalloc_end
- extern unsigned long vmalloc_end;
-#else
-# define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
-#endif
-
-/* fs/proc/kcore.c */
-#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL)
-#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL)
-
-/*
- * Conversion functions: convert page frame number (pfn) and a protection value to a page
- * table entry (pte).
- */
-#define pfn_pte(pfn, pgprot) \
-({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
-
-/* Extract pfn from pte. */
-#define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
-
-#define mk_pte(page, pgprot) pfn_pte(page_to_mfn(page), (pgprot))
-
-/* This takes a physical page address that is used by the remapping functions */
-#define mk_pte_phys(physpage, pgprot) \
-({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
-
-#define pte_modify(_pte, newprot) \
- (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
-
-#define page_pte_prot(page,prot) mk_pte(page, prot)
-#define page_pte(page) page_pte_prot(page, __pgprot(0))
-
-#define pte_none(pte) (!pte_val(pte))
-#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
-#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
-/* pte_page() returns the "struct page *" corresponding to the PTE: */
-#define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
-
-#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
-#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
-#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
-#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
-#define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
-
-#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
-#define pud_present(pud) (pud_val(pud) != 0UL)
-#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
-
-#define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
-
-/*
- * The following have defined behavior only work if pte_present() is true.
- */
-#define pte_user(pte) ((pte_val(pte) & _PAGE_PL_MASK) == _PAGE_PL_3)
-#define pte_read(pte) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)
-#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
-#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
-#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
-#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
-#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
-#ifdef XEN
-#define pte_pgc_allocated(pte) ((pte_val(pte) & _PAGE_PGC_ALLOCATED) != 0)
-#define pte_mem(pte) (!(pte_val(pte) & _PAGE_IO) && !pte_none(pte))
-#endif
-/*
- * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
- * access rights:
- */
-#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
-#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
-#define pte_mkexec(pte) (__pte(pte_val(pte) | _PAGE_AR_RX))
-#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
-#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
-#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
-#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
-#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P))
-
-/*
- * Macro to a page protection value as "uncacheable". Note that "protection" is really a
- * misnomer here as the protection value contains the memory attribute bits, dirty bits,
- * and various other bits as well.
- */
-#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
-
-/*
- * Macro to make mark a page protection value as "write-combining".
- * Note that "protection" is really a misnomer here as the protection
- * value contains the memory attribute bits, dirty bits, and various
- * other bits as well. Accesses through a write-combining translation
- * works bypasses the caches, but does allow for consecutive writes to
- * be combined into single (but larger) write transactions.
- */
-#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
-
-static inline unsigned long
-pgd_index (unsigned long address)
-{
- unsigned long region = address >> 61;
- unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
-
- return (region << (PAGE_SHIFT - 6)) | l1index;
-}
-
-/* The offset in the 1-level directory is given by the 3 region bits
- (61..63) and the level-1 bits. */
-#ifndef XEN
-static inline pgd_t*
-#else
-static inline volatile pgd_t*
-#endif
-pgd_offset (struct mm_struct *mm, unsigned long address)
-{
- return mm->pgd + pgd_index(address);
-}
-
-/* In the kernel's mapped region we completely ignore the region number
- (since we know it's in region number 5). */
-#define pgd_offset_k(addr) \
- (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
-
-/* Look up a pgd entry in the gate area. On IA-64, the gate-area
- resides in the kernel-mapped segment, hence we use pgd_offset_k()
- here. */
-#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
-
-/* Find an entry in the second-level page table.. */
-#ifndef XEN
-#define pmd_offset(dir,addr) \
- ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
-#else
-#define pmd_offset(dir,addr) \
- ((volatile pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
-#endif
-
-/*
- * Find an entry in the third-level page table. This looks more complicated than it
- * should be because some platforms place page tables in high memory.
- */
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#ifndef XEN
-#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
-#else
-#define pte_offset_kernel(dir,addr) ((volatile pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
-#endif
-#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
-#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
-#ifndef XEN
-/* atomic versions of the some PTE manipulations: */
-
-static inline int
-ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
-#ifdef CONFIG_SMP
- if (!pte_young(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_A_BIT, ptep);
-#else
- pte_t pte = *ptep;
- if (!pte_young(pte))
- return 0;
- set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
- return 1;
-#endif
-}
-
-static inline int
-ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
-#ifdef CONFIG_SMP
- if (!pte_dirty(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_D_BIT, ptep);
-#else
- pte_t pte = *ptep;
- if (!pte_dirty(pte))
- return 0;
- set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte));
- return 1;
-#endif
-}
-#endif
-
-#ifdef XEN
-static inline pte_t
-ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- volatile pte_t *ptep)
-#else
-static inline pte_t
-ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-#endif
-{
-#ifdef CONFIG_SMP
- return __pte(xchg((long *) ptep, 0));
-#else
- pte_t pte = *ptep;
- pte_clear(mm, addr, ptep);
- return pte;
-#endif
-}
-
-#ifdef XEN
-static inline pte_t
-ptep_xchg(struct mm_struct *mm, unsigned long addr,
- volatile pte_t *ptep, pte_t npte)
-{
-#ifdef CONFIG_SMP
- return __pte(xchg((long *) ptep, pte_val(npte)));
-#else
- pte_t pte = *ptep;
- set_pte (ptep, npte);
- return pte;
-#endif
-}
-
-static inline pte_t
-ptep_cmpxchg_rel(struct mm_struct *mm, unsigned long addr,
- volatile pte_t *ptep, pte_t old_pte, pte_t new_pte)
-{
-#ifdef CONFIG_SMP
- return __pte(cmpxchg_rel(&pte_val(*ptep),
- pte_val(old_pte), pte_val(new_pte)));
-#else
- pte_t pte = *ptep;
- if (pte_val(pte) == pte_val(old_pte)) {
- set_pte(ptep, new_pte);
- }
- return pte;
-#endif
-}
-#endif
-
-#ifndef XEN
-static inline void
-ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
-#ifdef CONFIG_SMP
- unsigned long new, old;
-
- do {
- old = pte_val(*ptep);
- new = pte_val(pte_wrprotect(__pte (old)));
- } while (cmpxchg((unsigned long *) ptep, old, new) != old);
-#else
- pte_t old_pte = *ptep;
- set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
-#endif
-}
-
-static inline int
-pte_same (pte_t a, pte_t b)
-{
- return pte_val(a) == pte_val(b);
-}
-
-#define update_mmu_cache(vma, address, pte) do { } while (0)
-#endif /* XEN */
-
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern void paging_init (void);
-
-/*
- * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
- * bits in the swap-type field of the swap pte. It would be nice to
- * enforce that, but we can't easily include <linux/swap.h> here.
- * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
- *
- * Format of swap pte:
- * bit 0 : present bit (must be zero)
- * bit 1 : _PAGE_FILE (must be zero)
- * bits 2- 8: swap-type
- * bits 9-62: swap offset
- * bit 63 : _PAGE_PROTNONE bit
- *
- * Format of file pte:
- * bit 0 : present bit (must be zero)
- * bit 1 : _PAGE_FILE (must be one)
- * bits 2-62: file_offset/PAGE_SIZE
- * bit 63 : _PAGE_PROTNONE bit
- */
-#define __swp_type(entry) (((entry).val >> 2) & 0x7f)
-#define __swp_offset(entry) (((entry).val << 1) >> 10)
-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-#define PTE_FILE_MAX_BITS 61
-#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
-
-/* XXX is this right? */
-#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
- remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
-
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-#ifndef XEN
-extern struct page *zero_page_memmap_ptr;
-#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
-#endif
-
-/* We provide our own get_unmapped_area to cope with VA holes for userland */
-#define HAVE_ARCH_UNMAPPED_AREA
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
-#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
-#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
-struct mmu_gather;
-void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
- unsigned long end, unsigned long floor, unsigned long ceiling);
-#endif
-
-/*
- * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
- * information. However, we use this routine to take care of any (delayed) i-cache
- * flushing that may be necessary.
- */
-extern void lazy_mmu_prot_update (pte_t pte);
-
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-/*
- * Update PTEP with ENTRY, which is guaranteed to be a less
- * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
- * WRITABLE bits turned on, when the value at PTEP did not. The
- * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
- *
- * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
- * having to worry about races. On SMP machines, there are only two
- * cases where this is true:
- *
- * (1) *PTEP has the PRESENT bit turned OFF
- * (2) ENTRY has the DIRTY bit turned ON
- *
- * On ia64, we could implement this routine with a cmpxchg()-loop
- * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
- * However, like on x86, we can get a more streamlined version by
- * observing that it is OK to drop ACCESSED bit updates when
- * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
- * result in an extra Access-bit fault, which would then turn on the
- * ACCESSED bit in the low-level fault handler (iaccess_bit or
- * daccess_bit in ivt.S).
- */
-#ifdef CONFIG_SMP
-# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
-do { \
- if (__safely_writable) { \
- set_pte(__ptep, __entry); \
- flush_tlb_page(__vma, __addr); \
- } \
-} while (0)
-#else
-# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
- ptep_establish(__vma, __addr, __ptep, __entry)
-#endif
-
-# ifdef CONFIG_VIRTUAL_MEM_MAP
- /* arch mem_map init routine is needed due to holes in a virtual mem_map */
-# define __HAVE_ARCH_MEMMAP_INIT
- extern void memmap_init (unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn);
-# endif /* CONFIG_VIRTUAL_MEM_MAP */
-# endif /* !__ASSEMBLY__ */
-
-/*
- * Identity-mapped regions use a large page size. We'll call such large pages
- * "granules". If you can think of a better name that's unambiguous, let me
- * know...
- */
-#if defined(CONFIG_IA64_GRANULE_64MB)
-# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
-#elif defined(CONFIG_IA64_GRANULE_16MB)
-# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
-#endif
-#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
-/*
- * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
- */
-#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
-#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
-
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
-/* These tell get_user_pages() that the first gate page is accessible from user-level. */
-#define FIXADDR_USER_START GATE_ADDR
-#ifdef HAVE_BUGGY_SEGREL
-# define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
-#else
-# define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
-#endif
-
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-#define __HAVE_ARCH_PTE_SAME
-#define __HAVE_ARCH_PGD_OFFSET_GATE
-#define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
-
-#include <asm-generic/pgtable-nopud.h>
-#include <asm-generic/pgtable.h>
-
-#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/processor.h b/xen/include/asm-ia64/linux-xen/asm/processor.h
deleted file mode 100644
index f81547ba31..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/processor.h
+++ /dev/null
@@ -1,795 +0,0 @@
-#ifndef _ASM_IA64_PROCESSOR_H
-#define _ASM_IA64_PROCESSOR_H
-
-/*
- * Copyright (C) 1998-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
- *
- * 11/24/98 S.Eranian added ia64_set_iva()
- * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
- * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
- */
-
-#include <linux/config.h>
-
-#include <asm/intrinsics.h>
-#include <asm/kregs.h>
-#if !defined(XEN)
-#include <asm/ptrace.h>
-#elif !defined(__ASSEMBLY__)
-struct cpu_user_regs;
-#define pt_regs cpu_user_regs
-#endif
-#include <asm/ustack.h>
-
-/* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */
-#define ARCH_HAS_SCHED_DOMAIN
-
-#define IA64_NUM_DBG_REGS 8
-/*
- * Limits for PMC and PMD are set to less than maximum architected values
- * but should be sufficient for a while
- */
-#ifdef XEN
-/*
- * These are increased in linux-2.6.16. Montecito requires 35PMDs.
- * This ifdef will become unnecessary when this header file is
- * upgraded to 2.6.16 or newer.
- */
-#define IA64_NUM_PMC_REGS 64
-#define IA64_NUM_PMD_REGS 64
-#else
-#define IA64_NUM_PMC_REGS 32
-#define IA64_NUM_PMD_REGS 32
-#endif
-
-#define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
-#define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
-
-/*
- * TASK_SIZE really is a mis-named. It really is the maximum user
- * space address (plus one). On IA-64, there are five regions of 2TB
- * each (assuming 8KB page size), for a total of 8TB of user virtual
- * address space.
- */
-#define TASK_SIZE (current->thread.task_size)
-
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE (current->thread.map_base)
-
-#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
-#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
-#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
-#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
-#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
- /* bit 5 is currently unused */
-#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
-#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
-
-#define IA64_THREAD_UAC_SHIFT 3
-#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
-#define IA64_THREAD_FPEMU_SHIFT 6
-#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
-
-
-/*
- * This shift should be large enough to be able to represent 1000000000/itc_freq with good
- * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
- * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
- */
-#define IA64_NSEC_PER_CYC_SHIFT 30
-
-#ifndef __ASSEMBLY__
-
-#include <linux/cache.h>
-#include <linux/compiler.h>
-#include <linux/threads.h>
-#include <linux/types.h>
-
-#include <asm/fpu.h>
-#include <asm/page.h>
-#include <asm/percpu.h>
-#include <asm/rse.h>
-#include <asm/unwind.h>
-#include <asm/atomic.h>
-#ifdef CONFIG_NUMA
-#include <asm/nodedata.h>
-#endif
-
-#ifdef XEN
-#include <asm/xenprocessor.h>
-#include <xen/bitops.h>
-#else
-/* like above but expressed as bitfields for more efficient access: */
-struct ia64_psr {
- __u64 reserved0 : 1;
- __u64 be : 1;
- __u64 up : 1;
- __u64 ac : 1;
- __u64 mfl : 1;
- __u64 mfh : 1;
- __u64 reserved1 : 7;
- __u64 ic : 1;
- __u64 i : 1;
- __u64 pk : 1;
- __u64 reserved2 : 1;
- __u64 dt : 1;
- __u64 dfl : 1;
- __u64 dfh : 1;
- __u64 sp : 1;
- __u64 pp : 1;
- __u64 di : 1;
- __u64 si : 1;
- __u64 db : 1;
- __u64 lp : 1;
- __u64 tb : 1;
- __u64 rt : 1;
- __u64 reserved3 : 4;
- __u64 cpl : 2;
- __u64 is : 1;
- __u64 mc : 1;
- __u64 it : 1;
- __u64 id : 1;
- __u64 da : 1;
- __u64 dd : 1;
- __u64 ss : 1;
- __u64 ri : 2;
- __u64 ed : 1;
- __u64 bn : 1;
- __u64 reserved4 : 19;
-};
-#endif
-
-/*
- * CPU type, hardware bug flags, and per-CPU state. Frequently used
- * state comes earlier:
- */
-struct cpuinfo_ia64 {
- __u32 softirq_pending;
- __u64 itm_delta; /* # of clock cycles between clock ticks */
- __u64 itm_next; /* interval timer mask value to use for next clock tick */
- __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
- __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
- __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
- __u64 itc_freq; /* frequency of ITC counter */
- __u64 proc_freq; /* frequency of processor */
- __u64 cyc_per_usec; /* itc_freq/1000000 */
- __u64 ptce_base;
- __u32 ptce_count[2];
- __u32 ptce_stride[2];
- struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
-
-#ifdef CONFIG_SMP
- __u64 loops_per_jiffy;
- int cpu;
- __u32 socket_id; /* physical processor socket id */
- __u16 core_id; /* core id */
- __u16 thread_id; /* thread id */
- __u16 num_log; /* Total number of logical processors on
- * this socket that were successfully booted */
- __u8 cores_per_socket; /* Cores per processor socket */
- __u8 threads_per_core; /* Threads per core */
-#endif
-
- /* CPUID-derived information: */
- __u64 ppn;
- __u64 features;
- __u8 number;
- __u8 revision;
- __u8 model;
- __u8 family;
- __u8 archrev;
- char vendor[16];
-
-#ifdef CONFIG_NUMA
- struct ia64_node_data *node_data;
-#endif
-};
-
-DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
-DECLARE_PER_CPU(int, cpu_state);
-
-/*
- * The "local" data variable. It refers to the per-CPU data of the currently executing
- * CPU, much like "current" points to the per-task data of the currently executing task.
- * Do not use the address of local_cpu_data, since it will be different from
- * cpu_data(smp_processor_id())!
- */
-#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
-#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
-
-#ifdef CONFIG_SMP
-#define cpu_to_core(cpu) (cpu_data(cpu)->core_id)
-#define cpu_to_socket(cpu) (cpu_data(cpu)->socket_id)
-#else
-#define cpu_to_core(cpu) 0
-#define cpu_to_socket(cpu) 0
-#endif
-
-extern void identify_cpu (struct cpuinfo_ia64 *);
-extern void print_cpu_info (struct cpuinfo_ia64 *);
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-#define SET_UNALIGN_CTL(task,value) \
-({ \
- (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
- | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
- 0; \
-})
-#define GET_UNALIGN_CTL(task,addr) \
-({ \
- put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
- (int __user *) (addr)); \
-})
-
-#define SET_FPEMU_CTL(task,value) \
-({ \
- (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
- | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
- 0; \
-})
-#define GET_FPEMU_CTL(task,addr) \
-({ \
- put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
- (int __user *) (addr)); \
-})
-
-#ifdef CONFIG_IA32_SUPPORT
-struct desc_struct {
- unsigned int a, b;
-};
-
-#define desc_empty(desc) (!((desc)->a + (desc)->b))
-#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
-
-#define GDT_ENTRY_TLS_ENTRIES 3
-#define GDT_ENTRY_TLS_MIN 6
-#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
-
-#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
-
-struct partial_page_list;
-#endif
-
-struct thread_struct {
- __u32 flags; /* various thread flags (see IA64_THREAD_*) */
- /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
- __u8 on_ustack; /* executing on user-stacks? */
- __u8 pad[3];
- __u64 ksp; /* kernel stack pointer */
- __u64 map_base; /* base address for get_unmapped_area() */
- __u64 task_size; /* limit for task size */
- __u64 rbs_bot; /* the base address for the RBS */
- int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
-
-#ifdef CONFIG_IA32_SUPPORT
- __u64 eflag; /* IA32 EFLAGS reg */
- __u64 fsr; /* IA32 floating pt status reg */
- __u64 fcr; /* IA32 floating pt control reg */
- __u64 fir; /* IA32 fp except. instr. reg */
- __u64 fdr; /* IA32 fp except. data reg */
- __u64 old_k1; /* old value of ar.k1 */
- __u64 old_iob; /* old IOBase value */
- struct partial_page_list *ppl; /* partial page list for 4K page size issue */
- /* cached TLS descriptors. */
- struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
-
-# define INIT_THREAD_IA32 .eflag = 0, \
- .fsr = 0, \
- .fcr = 0x17800000037fULL, \
- .fir = 0, \
- .fdr = 0, \
- .old_k1 = 0, \
- .old_iob = 0, \
- .ppl = NULL,
-#else
-# define INIT_THREAD_IA32
-#endif /* CONFIG_IA32_SUPPORT */
-#ifdef CONFIG_PERFMON
- __u64 pmcs[IA64_NUM_PMC_REGS];
- __u64 pmds[IA64_NUM_PMD_REGS];
- void *pfm_context; /* pointer to detailed PMU context */
- unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
-# define INIT_THREAD_PM .pmcs = {0UL, }, \
- .pmds = {0UL, }, \
- .pfm_context = NULL, \
- .pfm_needs_checking = 0UL,
-#else
-# define INIT_THREAD_PM
-#endif
-#ifndef XEN
- __u64 dbr[IA64_NUM_DBG_REGS];
- __u64 ibr[IA64_NUM_DBG_REGS];
-#endif
- struct ia64_fpreg fph[96]; /* saved/loaded on demand */
-};
-
-#ifndef XEN
-#define INIT_THREAD { \
- .flags = 0, \
- .on_ustack = 0, \
- .ksp = 0, \
- .map_base = DEFAULT_MAP_BASE, \
- .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
- .task_size = DEFAULT_TASK_SIZE, \
- .last_fph_cpu = -1, \
- INIT_THREAD_IA32 \
- INIT_THREAD_PM \
- .dbr = {0, }, \
- .ibr = {0, }, \
- .fph = {{{{0}}}, } \
-}
-
-#define start_thread(regs,new_ip,new_sp) do { \
- set_fs(USER_DS); \
- regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
- & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
- regs->cr_iip = new_ip; \
- regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
- regs->ar_rnat = 0; \
- regs->ar_bspstore = current->thread.rbs_bot; \
- regs->ar_fpsr = FPSR_DEFAULT; \
- regs->loadrs = 0; \
- regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
- regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
- if (unlikely(!current->mm->dumpable)) { \
- /* \
- * Zap scratch regs to avoid leaking bits between processes with different \
- * uid/privileges. \
- */ \
- regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
- regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
- } \
-} while (0)
-#endif
-
-/* Forward declarations, a strange C thing... */
-struct mm_struct;
-struct task_struct;
-
-/*
- * Free all resources held by a thread. This is called after the
- * parent of DEAD_TASK has collected the exit status of the task via
- * wait().
- */
-#define release_thread(dead_task)
-
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk) do { } while (0)
-
-/*
- * This is the mechanism for creating a new kernel thread.
- *
- * NOTE 1: Only a kernel-only process (ie the swapper or direct
- * descendants who haven't done an "execve()") should use this: it
- * will work within a system call from a "real" process, but the
- * process memory space will not be free'd until both the parent and
- * the child have exited.
- *
- * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
- * into trouble in init/main.c when the child thread returns to
- * do_basic_setup() and the timing is such that free_initmem() has
- * been called already.
- */
-extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
-
-/* Get wait channel for task P. */
-extern unsigned long get_wchan (struct task_struct *p);
-
-/* Return instruction pointer of blocked task TSK. */
-#define KSTK_EIP(tsk) \
- ({ \
- struct pt_regs *_regs = ia64_task_regs(tsk); \
- _regs->cr_iip + ia64_psr(_regs)->ri; \
- })
-
-/* Return stack pointer of blocked task TSK. */
-#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
-
-extern void ia64_getreg_unknown_kr (void);
-extern void ia64_setreg_unknown_kr (void);
-
-#define ia64_get_kr(regnum) \
-({ \
- unsigned long r = 0; \
- \
- switch (regnum) { \
- case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
- case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
- case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
- case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
- case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
- case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
- case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
- case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
- default: ia64_getreg_unknown_kr(); break; \
- } \
- r; \
-})
-
-#define ia64_set_kr(regnum, r) \
-({ \
- switch (regnum) { \
- case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
- case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
- case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
- case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
- case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
- case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
- case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
- case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
- default: ia64_setreg_unknown_kr(); break; \
- } \
-})
-
-/*
- * The following three macros can't be inline functions because we don't have struct
- * task_struct at this point.
- */
-
-/*
- * Return TRUE if task T owns the fph partition of the CPU we're running on.
- * Must be called from code that has preemption disabled.
- */
-#ifndef XEN
-#define ia64_is_local_fpu_owner(t) \
-({ \
- struct task_struct *__ia64_islfo_task = (t); \
- (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
- && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
-})
-#endif
-
-/*
- * Mark task T as owning the fph partition of the CPU we're running on.
- * Must be called from code that has preemption disabled.
- */
-#define ia64_set_local_fpu_owner(t) do { \
- struct task_struct *__ia64_slfo_task = (t); \
- __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
- ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
-} while (0)
-
-/* Mark the fph partition of task T as being invalid on all CPUs. */
-#define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
-
-extern void __ia64_init_fpu (void);
-extern void __ia64_save_fpu (struct ia64_fpreg *fph);
-extern void __ia64_load_fpu (struct ia64_fpreg *fph);
-extern void ia64_save_debug_regs (unsigned long *save_area);
-extern void ia64_load_debug_regs (unsigned long *save_area);
-#ifdef XEN
-extern void dump_stack(void);
-#endif
-
-#ifdef CONFIG_IA32_SUPPORT
-extern void ia32_save_state (struct task_struct *task);
-extern void ia32_load_state (struct task_struct *task);
-#endif
-
-#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
-#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
-
-/* load fp 0.0 into fph */
-static inline void
-ia64_init_fpu (void) {
- ia64_fph_enable();
- __ia64_init_fpu();
- ia64_fph_disable();
-}
-
-/* save f32-f127 at FPH */
-static inline void
-ia64_save_fpu (struct ia64_fpreg *fph) {
- ia64_fph_enable();
- __ia64_save_fpu(fph);
- ia64_fph_disable();
-}
-
-/* load f32-f127 from FPH */
-static inline void
-ia64_load_fpu (struct ia64_fpreg *fph) {
- ia64_fph_enable();
- __ia64_load_fpu(fph);
- ia64_fph_disable();
-}
-
-static inline __u64
-ia64_clear_ic (void)
-{
- __u64 psr;
- psr = ia64_getreg(_IA64_REG_PSR);
- ia64_stop();
- ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
- ia64_srlz_i();
- return psr;
-}
-
-/*
- * Restore the psr.
- */
-static inline void
-ia64_set_psr (__u64 psr)
-{
- ia64_stop();
- ia64_setreg(_IA64_REG_PSR_L, psr);
- ia64_srlz_d();
-}
-
-/*
- * Insert a translation into an instruction and/or data translation
- * register.
- */
-static inline void
-ia64_itr (__u64 target_mask, __u64 tr_num,
- __u64 vmaddr, __u64 pte,
- __u64 log_page_size)
-{
- ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
- ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
- ia64_stop();
- if (target_mask & 0x1)
- ia64_itri(tr_num, pte);
- if (target_mask & 0x2)
- ia64_itrd(tr_num, pte);
-}
-
-/*
- * Insert a translation into the instruction and/or data translation
- * cache.
- */
-#ifdef XEN
-static inline void
-ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, __u64 itir)
-{
- ia64_setreg(_IA64_REG_CR_ITIR, itir);
- ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
- ia64_stop();
- /* as per EAS2.6, itc must be the last instruction in an instruction group */
- if (target_mask & 0x1)
- ia64_itci(pte);
- if (target_mask & 0x2)
- ia64_itcd(pte);
-}
-#else
-static inline void
-ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
- __u64 log_page_size)
-{
- ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
- ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
- ia64_stop();
- /* as per EAS2.6, itc must be the last instruction in an instruction group */
- if (target_mask & 0x1)
- ia64_itci(pte);
- if (target_mask & 0x2)
- ia64_itcd(pte);
-}
-#endif
-
-/*
- * Purge a range of addresses from instruction and/or data translation
- * register(s).
- */
-static inline void
-ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
-{
- if (target_mask & 0x1)
- ia64_ptri(vmaddr, (log_size << 2));
- if (target_mask & 0x2)
- ia64_ptrd(vmaddr, (log_size << 2));
-}
-
-/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
-static inline void
-ia64_set_iva (void *ivt_addr)
-{
- ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
- ia64_srlz_i();
-}
-
-/* Set the page table address and control bits. */
-static inline void
-ia64_set_pta (__u64 pta)
-{
- /* Note: srlz.i implies srlz.d */
- ia64_setreg(_IA64_REG_CR_PTA, pta);
- ia64_srlz_i();
-}
-
-static inline void
-ia64_eoi (void)
-{
- ia64_setreg(_IA64_REG_CR_EOI, 0);
- ia64_srlz_d();
-}
-
-#define cpu_relax() ia64_hint(ia64_hint_pause)
-
-static inline int
-ia64_get_irr(unsigned int vector)
-{
- unsigned int reg = vector / 64;
- unsigned int bit = vector % 64;
- u64 irr;
-
- switch (reg) {
- case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
- case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
- case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
- case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
- }
-
- return test_bit(bit, &irr);
-}
-
-static inline void
-ia64_set_lrr0 (unsigned long val)
-{
- ia64_setreg(_IA64_REG_CR_LRR0, val);
- ia64_srlz_d();
-}
-
-static inline void
-ia64_set_lrr1 (unsigned long val)
-{
- ia64_setreg(_IA64_REG_CR_LRR1, val);
- ia64_srlz_d();
-}
-
-
-/*
- * Given the address to which a spill occurred, return the unat bit
- * number that corresponds to this address.
- */
-static inline __u64
-ia64_unat_pos (void *spill_addr)
-{
- return ((__u64) spill_addr >> 3) & 0x3f;
-}
-
-/*
- * Set the NaT bit of an integer register which was spilled at address
- * SPILL_ADDR. UNAT is the mask to be updated.
- */
-static inline void
-ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
-{
- __u64 bit = ia64_unat_pos(spill_addr);
- __u64 mask = 1UL << bit;
-
- *unat = (*unat & ~mask) | (nat << bit);
-}
-
-/*
- * Return saved PC of a blocked thread.
- * Note that the only way T can block is through a call to schedule() -> switch_to().
- */
-static inline unsigned long
-thread_saved_pc (struct task_struct *t)
-{
- struct unw_frame_info info;
- unsigned long ip;
-
- unw_init_from_blocked_task(&info, t);
- if (unw_unwind(&info) < 0)
- return 0;
- unw_get_ip(&info, &ip);
- return ip;
-}
-
-/*
- * Get the current instruction/program counter value.
- */
-#define current_text_addr() \
- ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
-
-static inline __u64
-ia64_get_ivr (void)
-{
- __u64 r;
- ia64_srlz_d();
- r = ia64_getreg(_IA64_REG_CR_IVR);
- ia64_srlz_d();
- return r;
-}
-
-#ifdef XEN
-/* Get the page table address and control bits. */
-static inline __u64
-ia64_get_pta (void)
-{
- __u64 r;
- ia64_srlz_d();
- r = ia64_getreg(_IA64_REG_CR_PTA);
- ia64_srlz_d();
- return r;
-}
-#endif
-
-static inline void
-ia64_set_dbr (__u64 regnum, __u64 value)
-{
- __ia64_set_dbr(regnum, value);
-#ifdef CONFIG_ITANIUM
- ia64_srlz_d();
-#endif
-}
-
-static inline __u64
-ia64_get_dbr (__u64 regnum)
-{
- __u64 retval;
-
- retval = __ia64_get_dbr(regnum);
-#ifdef CONFIG_ITANIUM
- ia64_srlz_d();
-#endif
- return retval;
-}
-
-static inline __u64
-ia64_rotr (__u64 w, __u64 n)
-{
- return (w >> n) | (w << (64 - n));
-}
-
-#define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
-
-/*
- * Take a mapped kernel address and return the equivalent address
- * in the region 7 identity mapped virtual area.
- */
-static inline void *
-ia64_imva (void *addr)
-{
- void *result;
- result = (void *) ia64_tpa(addr);
- return __va(result);
-}
-
-#define ARCH_HAS_PREFETCH
-#define ARCH_HAS_PREFETCHW
-#define ARCH_HAS_SPINLOCK_PREFETCH
-#define PREFETCH_STRIDE L1_CACHE_BYTES
-
-static inline void
-prefetch (const void *x)
-{
- ia64_lfetch(ia64_lfhint_none, x);
-}
-
-static inline void
-prefetchw (const void *x)
-{
- ia64_lfetch_excl(ia64_lfhint_none, x);
-}
-
-#define spin_lock_prefetch(x) prefetchw(x)
-
-extern unsigned long boot_option_idle_override;
-
-#ifdef XEN
-static inline unsigned int
-ia64_get_cpl(unsigned long psr)
-{
- return (psr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
-}
-#endif
-
-#endif /* !__ASSEMBLY__ */
-
-#ifdef XEN
-#include <asm/ptrace.h>
-#endif
-
-#endif /* _ASM_IA64_PROCESSOR_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/ptrace.h b/xen/include/asm-ia64/linux-xen/asm/ptrace.h
deleted file mode 100644
index 6d43826c0d..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/ptrace.h
+++ /dev/null
@@ -1,386 +0,0 @@
-#ifndef _ASM_IA64_PTRACE_H
-#define _ASM_IA64_PTRACE_H
-
-/*
- * Copyright (C) 1998-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 2003 Intel Co
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Fenghua Yu <fenghua.yu@intel.com>
- * Arun Sharma <arun.sharma@intel.com>
- *
- * 12/07/98 S. Eranian added pt_regs & switch_stack
- * 12/21/98 D. Mosberger updated to match latest code
- * 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
- *
- */
-/*
- * When a user process is blocked, its state looks as follows:
- *
- * +----------------------+ ------- IA64_STK_OFFSET
- * | | ^
- * | struct pt_regs | |
- * | | |
- * +----------------------+ |
- * | | |
- * | memory stack | |
- * | (growing downwards) | |
- * //.....................// |
- * |
- * //.....................// |
- * | | |
- * +----------------------+ |
- * | struct switch_stack | |
- * | | |
- * +----------------------+ |
- * | | |
- * //.....................// |
- * |
- * //.....................// |
- * | | |
- * | register stack | |
- * | (growing upwards) | |
- * | | |
- * +----------------------+ | --- IA64_RBS_OFFSET
- * | struct thread_info | | ^
- * +----------------------+ | |
- * | | | |
- * | struct task_struct | | |
- * current -> | | | |
- * +----------------------+ -------
- *
- * Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
- * This is because ar.ec is saved as part of ar.pfs.
- */
-
-#include <linux/config.h>
-
-#include <asm/fpu.h>
-#include <asm/offsets.h>
-
-/*
- * Base-2 logarithm of number of pages to allocate per task structure
- * (including register backing store and memory stack):
- */
-#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
-# define KERNEL_STACK_SIZE_ORDER 3
-#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
-# define KERNEL_STACK_SIZE_ORDER 2
-#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
-# define KERNEL_STACK_SIZE_ORDER 1
-#else
-# define KERNEL_STACK_SIZE_ORDER 0
-#endif
-
-#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 15) & ~15)
-#define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
-
-#define KERNEL_STACK_SIZE IA64_STK_OFFSET
-
-#ifndef __ASSEMBLY__
-
-#include <asm/current.h>
-#include <asm/page.h>
-
-/*
- * This struct defines the way the registers are saved on system
- * calls.
- *
- * We don't save all floating point register because the kernel
- * is compiled to use only a very small subset, so the other are
- * untouched.
- *
- * THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
- * (because the memory stack pointer MUST ALWAYS be aligned this way)
- *
- */
-#ifdef XEN
-#include <xen/types.h>
-#include <public/xen.h>
-
-#define pt_regs cpu_user_regs
-#endif
-
-struct pt_regs {
- /* The following registers are saved by SAVE_MIN: */
- unsigned long b6; /* scratch */
- unsigned long b7; /* scratch */
-
- unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
- unsigned long ar_ssd; /* reserved for future use (scratch) */
-
- unsigned long r8; /* scratch (return value register 0) */
- unsigned long r9; /* scratch (return value register 1) */
- unsigned long r10; /* scratch (return value register 2) */
- unsigned long r11; /* scratch (return value register 3) */
-
- unsigned long cr_ipsr; /* interrupted task's psr */
- unsigned long cr_iip; /* interrupted task's instruction pointer */
- /*
- * interrupted task's function state; if bit 63 is cleared, it
- * contains syscall's ar.pfs.pfm:
- */
- unsigned long cr_ifs;
-
- unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
- unsigned long ar_pfs; /* prev function state */
- unsigned long ar_rsc; /* RSE configuration */
- /* The following two are valid only if cr_ipsr.cpl > 0: */
- unsigned long ar_rnat; /* RSE NaT */
- unsigned long ar_bspstore; /* RSE bspstore */
-
- unsigned long pr; /* 64 predicate registers (1 bit each) */
- unsigned long b0; /* return pointer (bp) */
- unsigned long loadrs; /* size of dirty partition << 16 */
-
- unsigned long r1; /* the gp pointer */
- unsigned long r12; /* interrupted task's memory stack pointer */
- unsigned long r13; /* thread pointer */
-
- unsigned long ar_fpsr; /* floating point status (preserved) */
- unsigned long r15; /* scratch */
-
- /* The remaining registers are NOT saved for system calls. */
-
- unsigned long r14; /* scratch */
- unsigned long r2; /* scratch */
- unsigned long r3; /* scratch */
-
- /* The following registers are saved by SAVE_REST: */
- unsigned long r16; /* scratch */
- unsigned long r17; /* scratch */
- unsigned long r18; /* scratch */
- unsigned long r19; /* scratch */
- unsigned long r20; /* scratch */
- unsigned long r21; /* scratch */
- unsigned long r22; /* scratch */
- unsigned long r23; /* scratch */
- unsigned long r24; /* scratch */
- unsigned long r25; /* scratch */
- unsigned long r26; /* scratch */
- unsigned long r27; /* scratch */
- unsigned long r28; /* scratch */
- unsigned long r29; /* scratch */
- unsigned long r30; /* scratch */
- unsigned long r31; /* scratch */
-
- unsigned long ar_ccv; /* compare/exchange value (scratch) */
-
- /*
- * Floating point registers that the kernel considers scratch:
- */
- struct ia64_fpreg f6; /* scratch */
- struct ia64_fpreg f7; /* scratch */
- struct ia64_fpreg f8; /* scratch */
- struct ia64_fpreg f9; /* scratch */
- struct ia64_fpreg f10; /* scratch */
- struct ia64_fpreg f11; /* scratch */
-#ifdef XEN
- unsigned long r4; /* preserved */
- unsigned long r5; /* preserved */
- unsigned long r6; /* preserved */
- unsigned long r7; /* preserved */
- unsigned long eml_unat; /* used for emulating instruction */
- unsigned long pad0; /* alignment pad */
-#endif
-};
-
-#ifdef XEN
-/*
- * User regs are placed at the end of the vcpu area.
- * Convert a vcpu pointer to a regs pointer.
- * Note: this is the same as ia64_task_regs, but it uses a Xen-friendly name.
- */
-struct vcpu;
-static inline struct cpu_user_regs *vcpu_regs(struct vcpu *v)
-{
- return (struct cpu_user_regs *)((unsigned long)v + IA64_STK_OFFSET) - 1;
-}
-
-#define return_reg(v) (vcpu_regs(v)->r8)
-
-struct cpu_user_regs *guest_cpu_user_regs(void);
-
-extern void show_stack(struct task_struct *task, unsigned long *sp);
-#endif
-
-/*
- * This structure contains the addition registers that need to
- * preserved across a context switch. This generally consists of
- * "preserved" registers.
- */
-struct switch_stack {
- unsigned long caller_unat; /* user NaT collection register (preserved) */
- unsigned long ar_fpsr; /* floating-point status register */
-
- struct ia64_fpreg f2; /* preserved */
- struct ia64_fpreg f3; /* preserved */
- struct ia64_fpreg f4; /* preserved */
- struct ia64_fpreg f5; /* preserved */
-
- struct ia64_fpreg f12; /* scratch, but untouched by kernel */
- struct ia64_fpreg f13; /* scratch, but untouched by kernel */
- struct ia64_fpreg f14; /* scratch, but untouched by kernel */
- struct ia64_fpreg f15; /* scratch, but untouched by kernel */
- struct ia64_fpreg f16; /* preserved */
- struct ia64_fpreg f17; /* preserved */
- struct ia64_fpreg f18; /* preserved */
- struct ia64_fpreg f19; /* preserved */
- struct ia64_fpreg f20; /* preserved */
- struct ia64_fpreg f21; /* preserved */
- struct ia64_fpreg f22; /* preserved */
- struct ia64_fpreg f23; /* preserved */
- struct ia64_fpreg f24; /* preserved */
- struct ia64_fpreg f25; /* preserved */
- struct ia64_fpreg f26; /* preserved */
- struct ia64_fpreg f27; /* preserved */
- struct ia64_fpreg f28; /* preserved */
- struct ia64_fpreg f29; /* preserved */
- struct ia64_fpreg f30; /* preserved */
- struct ia64_fpreg f31; /* preserved */
-
- unsigned long r4; /* preserved */
- unsigned long r5; /* preserved */
- unsigned long r6; /* preserved */
- unsigned long r7; /* preserved */
-
- unsigned long b0; /* so we can force a direct return in copy_thread */
- unsigned long b1;
- unsigned long b2;
- unsigned long b3;
- unsigned long b4;
- unsigned long b5;
-
- unsigned long ar_pfs; /* previous function state */
- unsigned long ar_lc; /* loop counter (preserved) */
- unsigned long ar_unat; /* NaT bits for r4-r7 */
- unsigned long ar_rnat; /* RSE NaT collection register */
- unsigned long ar_bspstore; /* RSE dirty base (preserved) */
- unsigned long pr; /* 64 predicate registers (1 bit each) */
-};
-
-#ifdef __KERNEL__
-/*
- * We use the ia64_psr(regs)->ri to determine which of the three
- * instructions in bundle (16 bytes) took the sample. Generate
- * the canonical representation by adding to instruction pointer.
- */
-# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
-/* Conserve space in histogram by encoding slot bits in address
- * bits 2 and 3 rather than bits 0 and 1.
- */
-#define profile_pc(regs) \
-({ \
- unsigned long __ip = instruction_pointer(regs); \
- (__ip & ~3UL) + ((__ip & 3UL) << 2); \
-})
-
- /* given a pointer to a task_struct, return the user's pt_regs */
-# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
-# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
-#ifdef XEN
-# define guest_mode(regs) (ia64_psr(regs)->cpl && !ia64_psr(regs)->vm)
-# define guest_kernel_mode(regs) (ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL)
-# define vmx_guest_kernel_mode(regs) (ia64_psr(regs)->cpl == 0)
-# define regs_increment_iip(regs) \
-do { \
- struct ia64_psr *ipsr = ia64_psr(regs); \
- if (ipsr->ri == 2) { \
- ipsr->ri = 0; \
- regs->cr_iip += 16; \
- } else \
- ipsr->ri++; \
-} while (0)
-#else
-# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
-#endif
-# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
-# define fsys_mode(task,regs) \
- ({ \
- struct task_struct *_task = (task); \
- struct pt_regs *_regs = (regs); \
- !user_mode(_regs) && user_stack(_task, _regs); \
- })
-
- /*
- * System call handlers that, upon successful completion, need to return a negative value
- * should call force_successful_syscall_return() right before returning. On architectures
- * where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
- * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
- * flag will not get set. On architectures which do not support a separate error flag,
- * the macro is a no-op and the spurious error condition needs to be filtered out by some
- * other means (e.g., in user-level, by passing an extra argument to the syscall handler,
- * or something along those lines).
- *
- * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
- */
-# define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0)
-
- struct task_struct; /* forward decl */
- struct unw_frame_info; /* forward decl */
-
- extern void show_regs (struct pt_regs *);
- extern void ia64_do_show_stack (struct unw_frame_info *, void *);
- extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
- unsigned long *);
- extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
- unsigned long, long *);
- extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
- unsigned long, long);
- extern void ia64_flush_fph (struct task_struct *);
- extern void ia64_sync_fph (struct task_struct *);
- extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
- unsigned long, unsigned long);
-
- /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
- extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
- /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
- extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
-
- extern void ia64_increment_ip (struct pt_regs *pt);
- extern void ia64_decrement_ip (struct pt_regs *pt);
-
-#endif /* !__KERNEL__ */
-
-/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
-struct pt_all_user_regs {
- unsigned long nat;
- unsigned long cr_iip;
- unsigned long cfm;
- unsigned long cr_ipsr;
- unsigned long pr;
-
- unsigned long gr[32];
- unsigned long br[8];
- unsigned long ar[128];
- struct ia64_fpreg fr[128];
-};
-
-#endif /* !__ASSEMBLY__ */
-
-/* indices to application-registers array in pt_all_user_regs */
-#define PT_AUR_RSC 16
-#define PT_AUR_BSP 17
-#define PT_AUR_BSPSTORE 18
-#define PT_AUR_RNAT 19
-#define PT_AUR_CCV 32
-#define PT_AUR_UNAT 36
-#define PT_AUR_FPSR 40
-#define PT_AUR_PFS 64
-#define PT_AUR_LC 65
-#define PT_AUR_EC 66
-
-/*
- * The numbers chosen here are somewhat arbitrary but absolutely MUST
- * not overlap with any of the number assigned in <linux/ptrace.h>.
- */
-#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
-#define PTRACE_OLD_GETSIGINFO 13 /* (replaced by PTRACE_GETSIGINFO in <linux/ptrace.h>) */
-#define PTRACE_OLD_SETSIGINFO 14 /* (replaced by PTRACE_SETSIGINFO in <linux/ptrace.h>) */
-#define PTRACE_GETREGS 18 /* get all registers (pt_all_user_regs) in one shot */
-#define PTRACE_SETREGS 19 /* set all registers (pt_all_user_regs) in one shot */
-
-#define PTRACE_OLDSETOPTIONS 21
-
-#endif /* _ASM_IA64_PTRACE_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sal.h b/xen/include/asm-ia64/linux-xen/asm/sal.h
deleted file mode 100644
index e52ac0dbf5..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sal.h
+++ /dev/null
@@ -1,891 +0,0 @@
-#ifndef _ASM_IA64_SAL_H
-#define _ASM_IA64_SAL_H
-
-/*
- * System Abstraction Layer definitions.
- *
- * This is based on version 2.5 of the manual "IA-64 System
- * Abstraction Layer".
- *
- * Copyright (C) 2001 Intel
- * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
- * Copyright (C) 2001 Fred Lewis <frederick.v.lewis@intel.com>
- * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
- *
- * 02/01/04 J. Hall Updated Error Record Structures to conform to July 2001
- * revision of the SAL spec.
- * 01/01/03 fvlewis Updated Error Record Structures to conform with Nov. 2000
- * revision of the SAL spec.
- * 99/09/29 davidm Updated for SAL 2.6.
- * 00/03/29 cfleck Updated SAL Error Logging info for processor (SAL 2.6)
- * (plus examples of platform error info structures from smariset @ Intel)
- */
-
-#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT 0
-#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT 1
-#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT 2
-#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT 3
-
-#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK (1<<IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT)
-#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT)
-#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT)
-#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT (1<<IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT)
-
-#ifndef __ASSEMBLY__
-
-#include <linux/bcd.h>
-#include <linux/spinlock.h>
-#include <linux/efi.h>
-
-#include <asm/pal.h>
-#include <asm/system.h>
-#include <asm/fpu.h>
-
-extern spinlock_t sal_lock;
-
-/* SAL spec _requires_ eight args for each call. */
-#define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7) \
- result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7)
-
-# define SAL_CALL(result,args...) do { \
- unsigned long __ia64_sc_flags; \
- struct ia64_fpreg __ia64_sc_fr[6]; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- ia64_save_scratch_fpregs(__ia64_sc_fr); \
- spin_lock_irqsave(&sal_lock, __ia64_sc_flags); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- __SAL_CALL(result, args); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags); \
- ia64_load_scratch_fpregs(__ia64_sc_fr); \
-} while (0)
-
-# define SAL_CALL_NOLOCK(result,args...) do { \
- unsigned long __ia64_scn_flags; \
- struct ia64_fpreg __ia64_scn_fr[6]; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- ia64_save_scratch_fpregs(__ia64_scn_fr); \
- local_irq_save(__ia64_scn_flags); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- __SAL_CALL(result, args); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- local_irq_restore(__ia64_scn_flags); \
- ia64_load_scratch_fpregs(__ia64_scn_fr); \
-} while (0)
-
-# define SAL_CALL_REENTRANT(result,args...) do { \
- struct ia64_fpreg __ia64_scs_fr[6]; \
- XEN_EFI_RR_DECLARE(rr6, rr7); \
- ia64_save_scratch_fpregs(__ia64_scs_fr); \
- preempt_disable(); \
- XEN_EFI_RR_ENTER(rr6, rr7); \
- __SAL_CALL(result, args); \
- XEN_EFI_RR_LEAVE(rr6, rr7); \
- preempt_enable(); \
- ia64_load_scratch_fpregs(__ia64_scs_fr); \
-} while (0)
-
-#define SAL_SET_VECTORS 0x01000000
-#define SAL_GET_STATE_INFO 0x01000001
-#define SAL_GET_STATE_INFO_SIZE 0x01000002
-#define SAL_CLEAR_STATE_INFO 0x01000003
-#define SAL_MC_RENDEZ 0x01000004
-#define SAL_MC_SET_PARAMS 0x01000005
-#define SAL_REGISTER_PHYSICAL_ADDR 0x01000006
-
-#define SAL_CACHE_FLUSH 0x01000008
-#define SAL_CACHE_INIT 0x01000009
-#define SAL_PCI_CONFIG_READ 0x01000010
-#define SAL_PCI_CONFIG_WRITE 0x01000011
-#define SAL_FREQ_BASE 0x01000012
-#define SAL_PHYSICAL_ID_INFO 0x01000013
-
-#define SAL_UPDATE_PAL 0x01000020
-
-struct ia64_sal_retval {
- /*
- * A zero status value indicates call completed without error.
- * A negative status value indicates reason of call failure.
- * A positive status value indicates success but an
- * informational value should be printed (e.g., "reboot for
- * change to take effect").
- */
- s64 status;
- u64 v0;
- u64 v1;
- u64 v2;
-};
-
-typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
-
-enum {
- SAL_FREQ_BASE_PLATFORM = 0,
- SAL_FREQ_BASE_INTERVAL_TIMER = 1,
- SAL_FREQ_BASE_REALTIME_CLOCK = 2
-};
-
-/*
- * The SAL system table is followed by a variable number of variable
- * length descriptors. The structure of these descriptors follows
- * below.
- * The defininition follows SAL specs from July 2000
- */
-struct ia64_sal_systab {
- u8 signature[4]; /* should be "SST_" */
- u32 size; /* size of this table in bytes */
- u8 sal_rev_minor;
- u8 sal_rev_major;
- u16 entry_count; /* # of entries in variable portion */
- u8 checksum;
- u8 reserved1[7];
- u8 sal_a_rev_minor;
- u8 sal_a_rev_major;
- u8 sal_b_rev_minor;
- u8 sal_b_rev_major;
- /* oem_id & product_id: terminating NUL is missing if string is exactly 32 bytes long. */
- u8 oem_id[32];
- u8 product_id[32]; /* ASCII product id */
- u8 reserved2[8];
-};
-
-enum sal_systab_entry_type {
- SAL_DESC_ENTRY_POINT = 0,
- SAL_DESC_MEMORY = 1,
- SAL_DESC_PLATFORM_FEATURE = 2,
- SAL_DESC_TR = 3,
- SAL_DESC_PTC = 4,
- SAL_DESC_AP_WAKEUP = 5
-};
-
-/*
- * Entry type: Size:
- * 0 48
- * 1 32
- * 2 16
- * 3 32
- * 4 16
- * 5 16
- */
-#define SAL_DESC_SIZE(type) "\060\040\020\040\020\020"[(unsigned) type]
-
-typedef struct ia64_sal_desc_entry_point {
- u8 type;
- u8 reserved1[7];
- u64 pal_proc;
- u64 sal_proc;
- u64 gp;
- u8 reserved2[16];
-}ia64_sal_desc_entry_point_t;
-
-typedef struct ia64_sal_desc_memory {
- u8 type;
- u8 used_by_sal; /* needs to be mapped for SAL? */
- u8 mem_attr; /* current memory attribute setting */
- u8 access_rights; /* access rights set up by SAL */
- u8 mem_attr_mask; /* mask of supported memory attributes */
- u8 reserved1;
- u8 mem_type; /* memory type */
- u8 mem_usage; /* memory usage */
- u64 addr; /* physical address of memory */
- u32 length; /* length (multiple of 4KB pages) */
- u32 reserved2;
- u8 oem_reserved[8];
-} ia64_sal_desc_memory_t;
-
-typedef struct ia64_sal_desc_platform_feature {
- u8 type;
- u8 feature_mask;
- u8 reserved1[14];
-} ia64_sal_desc_platform_feature_t;
-
-typedef struct ia64_sal_desc_tr {
- u8 type;
- u8 tr_type; /* 0 == instruction, 1 == data */
- u8 regnum; /* translation register number */
- u8 reserved1[5];
- u64 addr; /* virtual address of area covered */
- u64 page_size; /* encoded page size */
- u8 reserved2[8];
-} ia64_sal_desc_tr_t;
-
-typedef struct ia64_sal_desc_ptc {
- u8 type;
- u8 reserved1[3];
- u32 num_domains; /* # of coherence domains */
- u64 domain_info; /* physical address of domain info table */
-} ia64_sal_desc_ptc_t;
-
-typedef struct ia64_sal_ptc_domain_info {
- u64 proc_count; /* number of processors in domain */
- u64 proc_list; /* physical address of LID array */
-} ia64_sal_ptc_domain_info_t;
-
-typedef struct ia64_sal_ptc_domain_proc_entry {
- u64 id : 8; /* id of processor */
- u64 eid : 8; /* eid of processor */
-} ia64_sal_ptc_domain_proc_entry_t;
-
-
-#define IA64_SAL_AP_EXTERNAL_INT 0
-
-typedef struct ia64_sal_desc_ap_wakeup {
- u8 type;
- u8 mechanism; /* 0 == external interrupt */
- u8 reserved1[6];
- u64 vector; /* interrupt vector in range 0x10-0xff */
-} ia64_sal_desc_ap_wakeup_t ;
-
-extern ia64_sal_handler ia64_sal;
-extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info;
-
-extern unsigned short sal_revision; /* supported SAL spec revision */
-extern unsigned short sal_version; /* SAL version; OEM dependent */
-#define SAL_VERSION_CODE(major, minor) ((BIN2BCD(major) << 8) | BIN2BCD(minor))
-
-extern const char *ia64_sal_strerror (long status);
-extern void ia64_sal_init (struct ia64_sal_systab *sal_systab);
-
-/* SAL information type encodings */
-enum {
- SAL_INFO_TYPE_MCA = 0, /* Machine check abort information */
- SAL_INFO_TYPE_INIT = 1, /* Init information */
- SAL_INFO_TYPE_CMC = 2, /* Corrected machine check information */
- SAL_INFO_TYPE_CPE = 3 /* Corrected platform error information */
-};
-
-/* Encodings for machine check parameter types */
-enum {
- SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezvous interrupt */
- SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */
- SAL_MC_PARAM_CPE_INT = 3 /* Corrected Platform Error Int */
-};
-
-/* Encodings for rendezvous mechanisms */
-enum {
- SAL_MC_PARAM_MECHANISM_INT = 1, /* Use interrupt */
- SAL_MC_PARAM_MECHANISM_MEM = 2 /* Use memory synchronization variable*/
-};
-
-/* Encodings for vectors which can be registered by the OS with SAL */
-enum {
- SAL_VECTOR_OS_MCA = 0,
- SAL_VECTOR_OS_INIT = 1,
- SAL_VECTOR_OS_BOOT_RENDEZ = 2
-};
-
-/* Encodings for mca_opt parameter sent to SAL_MC_SET_PARAMS */
-#define SAL_MC_PARAM_RZ_ALWAYS 0x1
-#define SAL_MC_PARAM_BINIT_ESCALATE 0x10
-
-/*
- * Definition of the SAL Error Log from the SAL spec
- */
-
-/* SAL Error Record Section GUID Definitions */
-#define SAL_PROC_DEV_ERR_SECT_GUID \
- EFI_GUID(0xe429faf1, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
-#define SAL_PLAT_MEM_DEV_ERR_SECT_GUID \
- EFI_GUID(0xe429faf2, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
-#define SAL_PLAT_SEL_DEV_ERR_SECT_GUID \
- EFI_GUID(0xe429faf3, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
-#define SAL_PLAT_PCI_BUS_ERR_SECT_GUID \
- EFI_GUID(0xe429faf4, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
-#define SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID \
- EFI_GUID(0xe429faf5, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
-#define SAL_PLAT_PCI_COMP_ERR_SECT_GUID \
- EFI_GUID(0xe429faf6, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
-#define SAL_PLAT_SPECIFIC_ERR_SECT_GUID \
- EFI_GUID(0xe429faf7, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
-#define SAL_PLAT_HOST_CTLR_ERR_SECT_GUID \
- EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
-#define SAL_PLAT_BUS_ERR_SECT_GUID \
- EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
-
-#define MAX_CACHE_ERRORS 6
-#define MAX_TLB_ERRORS 6
-#define MAX_BUS_ERRORS 1
-
-/* Definition of version according to SAL spec for logging purposes */
-typedef struct sal_log_revision {
- u8 minor; /* BCD (0..99) */
- u8 major; /* BCD (0..99) */
-} sal_log_revision_t;
-
-/* Definition of timestamp according to SAL spec for logging purposes */
-typedef struct sal_log_timestamp {
- u8 slh_second; /* Second (0..59) */
- u8 slh_minute; /* Minute (0..59) */
- u8 slh_hour; /* Hour (0..23) */
- u8 slh_reserved;
- u8 slh_day; /* Day (1..31) */
- u8 slh_month; /* Month (1..12) */
- u8 slh_year; /* Year (00..99) */
- u8 slh_century; /* Century (19, 20, 21, ...) */
-} sal_log_timestamp_t;
-
-/* Definition of log record header structures */
-typedef struct sal_log_record_header {
- u64 id; /* Unique monotonically increasing ID */
- sal_log_revision_t revision; /* Major and Minor revision of header */
- u16 severity; /* Error Severity */
- u32 len; /* Length of this error log in bytes */
- sal_log_timestamp_t timestamp; /* Timestamp */
- efi_guid_t platform_guid; /* Unique OEM Platform ID */
-} sal_log_record_header_t;
-
-#define sal_log_severity_recoverable 0
-#define sal_log_severity_fatal 1
-#define sal_log_severity_corrected 2
-
-/* Definition of log section header structures */
-typedef struct sal_log_sec_header {
- efi_guid_t guid; /* Unique Section ID */
- sal_log_revision_t revision; /* Major and Minor revision of Section */
- u16 reserved;
- u32 len; /* Section length */
-} sal_log_section_hdr_t;
-
-typedef struct sal_log_mod_error_info {
- struct {
- u64 check_info : 1,
- requestor_identifier : 1,
- responder_identifier : 1,
- target_identifier : 1,
- precise_ip : 1,
- reserved : 59;
- } valid;
- u64 check_info;
- u64 requestor_identifier;
- u64 responder_identifier;
- u64 target_identifier;
- u64 precise_ip;
-} sal_log_mod_error_info_t;
-
-typedef struct sal_processor_static_info {
- struct {
- u64 minstate : 1,
- br : 1,
- cr : 1,
- ar : 1,
- rr : 1,
- fr : 1,
- reserved : 58;
- } valid;
- pal_min_state_area_t min_state_area;
- u64 br[8];
- u64 cr[128];
- u64 ar[128];
- u64 rr[8];
- struct ia64_fpreg __attribute__ ((packed)) fr[128];
-} sal_processor_static_info_t;
-
-struct sal_cpuid_info {
- u64 regs[5];
- u64 reserved;
-};
-
-typedef struct sal_log_processor_info {
- sal_log_section_hdr_t header;
- struct {
- u64 proc_error_map : 1,
- proc_state_param : 1,
- proc_cr_lid : 1,
- psi_static_struct : 1,
- num_cache_check : 4,
- num_tlb_check : 4,
- num_bus_check : 4,
- num_reg_file_check : 4,
- num_ms_check : 4,
- cpuid_info : 1,
- reserved1 : 39;
- } valid;
- u64 proc_error_map;
- u64 proc_state_parameter;
- u64 proc_cr_lid;
- /*
- * The rest of this structure consists of variable-length arrays, which can't be
- * expressed in C.
- */
- sal_log_mod_error_info_t info[0];
- /*
- * This is what the rest looked like if C supported variable-length arrays:
- *
- * sal_log_mod_error_info_t cache_check_info[.valid.num_cache_check];
- * sal_log_mod_error_info_t tlb_check_info[.valid.num_tlb_check];
- * sal_log_mod_error_info_t bus_check_info[.valid.num_bus_check];
- * sal_log_mod_error_info_t reg_file_check_info[.valid.num_reg_file_check];
- * sal_log_mod_error_info_t ms_check_info[.valid.num_ms_check];
- * struct sal_cpuid_info cpuid_info;
- * sal_processor_static_info_t processor_static_info;
- */
-} sal_log_processor_info_t;
-
-/* Given a sal_log_processor_info_t pointer, return a pointer to the processor_static_info: */
-#define SAL_LPI_PSI_INFO(l) \
-({ sal_log_processor_info_t *_l = (l); \
- ((sal_processor_static_info_t *) \
- ((char *) _l->info + ((_l->valid.num_cache_check + _l->valid.num_tlb_check \
- + _l->valid.num_bus_check + _l->valid.num_reg_file_check \
- + _l->valid.num_ms_check) * sizeof(sal_log_mod_error_info_t) \
- + sizeof(struct sal_cpuid_info)))); \
-})
-
-/* platform error log structures */
-
-typedef struct sal_log_mem_dev_err_info {
- sal_log_section_hdr_t header;
- struct {
- u64 error_status : 1,
- physical_addr : 1,
- addr_mask : 1,
- node : 1,
- card : 1,
- module : 1,
- bank : 1,
- device : 1,
- row : 1,
- column : 1,
- bit_position : 1,
- requestor_id : 1,
- responder_id : 1,
- target_id : 1,
- bus_spec_data : 1,
- oem_id : 1,
- oem_data : 1,
- reserved : 47;
- } valid;
- u64 error_status;
- u64 physical_addr;
- u64 addr_mask;
- u16 node;
- u16 card;
- u16 module;
- u16 bank;
- u16 device;
- u16 row;
- u16 column;
- u16 bit_position;
- u64 requestor_id;
- u64 responder_id;
- u64 target_id;
- u64 bus_spec_data;
- u8 oem_id[16];
- u8 oem_data[1]; /* Variable length data */
-} sal_log_mem_dev_err_info_t;
-
-typedef struct sal_log_sel_dev_err_info {
- sal_log_section_hdr_t header;
- struct {
- u64 record_id : 1,
- record_type : 1,
- generator_id : 1,
- evm_rev : 1,
- sensor_type : 1,
- sensor_num : 1,
- event_dir : 1,
- event_data1 : 1,
- event_data2 : 1,
- event_data3 : 1,
- reserved : 54;
- } valid;
- u16 record_id;
- u8 record_type;
- u8 timestamp[4];
- u16 generator_id;
- u8 evm_rev;
- u8 sensor_type;
- u8 sensor_num;
- u8 event_dir;
- u8 event_data1;
- u8 event_data2;
- u8 event_data3;
-} sal_log_sel_dev_err_info_t;
-
-typedef struct sal_log_pci_bus_err_info {
- sal_log_section_hdr_t header;
- struct {
- u64 err_status : 1,
- err_type : 1,
- bus_id : 1,
- bus_address : 1,
- bus_data : 1,
- bus_cmd : 1,
- requestor_id : 1,
- responder_id : 1,
- target_id : 1,
- oem_data : 1,
- reserved : 54;
- } valid;
- u64 err_status;
- u16 err_type;
- u16 bus_id;
- u32 reserved;
- u64 bus_address;
- u64 bus_data;
- u64 bus_cmd;
- u64 requestor_id;
- u64 responder_id;
- u64 target_id;
- u8 oem_data[1]; /* Variable length data */
-} sal_log_pci_bus_err_info_t;
-
-typedef struct sal_log_smbios_dev_err_info {
- sal_log_section_hdr_t header;
- struct {
- u64 event_type : 1,
- length : 1,
- time_stamp : 1,
- data : 1,
- reserved1 : 60;
- } valid;
- u8 event_type;
- u8 length;
- u8 time_stamp[6];
- u8 data[1]; /* data of variable length, length == slsmb_length */
-} sal_log_smbios_dev_err_info_t;
-
-typedef struct sal_log_pci_comp_err_info {
- sal_log_section_hdr_t header;
- struct {
- u64 err_status : 1,
- comp_info : 1,
- num_mem_regs : 1,
- num_io_regs : 1,
- reg_data_pairs : 1,
- oem_data : 1,
- reserved : 58;
- } valid;
- u64 err_status;
- struct {
- u16 vendor_id;
- u16 device_id;
- u8 class_code[3];
- u8 func_num;
- u8 dev_num;
- u8 bus_num;
- u8 seg_num;
- u8 reserved[5];
- } comp_info;
- u32 num_mem_regs;
- u32 num_io_regs;
- u64 reg_data_pairs[1];
- /*
- * array of address/data register pairs is num_mem_regs + num_io_regs elements
- * long. Each array element consists of a u64 address followed by a u64 data
- * value. The oem_data array immediately follows the reg_data_pairs array
- */
- u8 oem_data[1]; /* Variable length data */
-} sal_log_pci_comp_err_info_t;
-
-typedef struct sal_log_plat_specific_err_info {
- sal_log_section_hdr_t header;
- struct {
- u64 err_status : 1,
- guid : 1,
- oem_data : 1,
- reserved : 61;
- } valid;
- u64 err_status;
- efi_guid_t guid;
- u8 oem_data[1]; /* platform specific variable length data */
-} sal_log_plat_specific_err_info_t;
-
-typedef struct sal_log_host_ctlr_err_info {
- sal_log_section_hdr_t header;
- struct {
- u64 err_status : 1,
- requestor_id : 1,
- responder_id : 1,
- target_id : 1,
- bus_spec_data : 1,
- oem_data : 1,
- reserved : 58;
- } valid;
- u64 err_status;
- u64 requestor_id;
- u64 responder_id;
- u64 target_id;
- u64 bus_spec_data;
- u8 oem_data[1]; /* Variable length OEM data */
-} sal_log_host_ctlr_err_info_t;
-
-typedef struct sal_log_plat_bus_err_info {
- sal_log_section_hdr_t header;
- struct {
- u64 err_status : 1,
- requestor_id : 1,
- responder_id : 1,
- target_id : 1,
- bus_spec_data : 1,
- oem_data : 1,
- reserved : 58;
- } valid;
- u64 err_status;
- u64 requestor_id;
- u64 responder_id;
- u64 target_id;
- u64 bus_spec_data;
- u8 oem_data[1]; /* Variable length OEM data */
-} sal_log_plat_bus_err_info_t;
-
-/* Overall platform error section structure */
-typedef union sal_log_platform_err_info {
- sal_log_mem_dev_err_info_t mem_dev_err;
- sal_log_sel_dev_err_info_t sel_dev_err;
- sal_log_pci_bus_err_info_t pci_bus_err;
- sal_log_smbios_dev_err_info_t smbios_dev_err;
- sal_log_pci_comp_err_info_t pci_comp_err;
- sal_log_plat_specific_err_info_t plat_specific_err;
- sal_log_host_ctlr_err_info_t host_ctlr_err;
- sal_log_plat_bus_err_info_t plat_bus_err;
-} sal_log_platform_err_info_t;
-
-/* SAL log over-all, multi-section error record structure (processor+platform) */
-typedef struct err_rec {
- sal_log_record_header_t sal_elog_header;
- sal_log_processor_info_t proc_err;
- sal_log_platform_err_info_t plat_err;
- u8 oem_data_pad[1024];
-} ia64_err_rec_t;
-
-/*
- * Now define a couple of inline functions for improved type checking
- * and convenience.
- */
-static inline long
-ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
- unsigned long *drift_info)
-{
- struct ia64_sal_retval isrv;
-
- SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
- *ticks_per_second = isrv.v0;
- *drift_info = isrv.v1;
- return isrv.status;
-}
-
-extern s64 ia64_sal_cache_flush (u64 cache_type);
-
-/* Initialize all the processor and platform level instruction and data caches */
-static inline s64
-ia64_sal_cache_init (void)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL(isrv, SAL_CACHE_INIT, 0, 0, 0, 0, 0, 0, 0);
- return isrv.status;
-}
-
-/*
- * Clear the processor and platform information logged by SAL with respect to the machine
- * state at the time of MCA's, INITs, CMCs, or CPEs.
- */
-static inline s64
-ia64_sal_clear_state_info (u64 sal_info_type)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL_REENTRANT(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, 0,
- 0, 0, 0, 0, 0);
- return isrv.status;
-}
-
-
-/* Get the processor and platform information logged by SAL with respect to the machine
- * state at the time of the MCAs, INITs, CMCs, or CPEs.
- */
-static inline u64
-ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
- sal_info, 0, 0, 0, 0);
- if (isrv.status)
- return 0;
-
- return isrv.v0;
-}
-
-/*
- * Get the maximum size of the information logged by SAL with respect to the machine state
- * at the time of MCAs, INITs, CMCs, or CPEs.
- */
-static inline u64
-ia64_sal_get_state_info_size (u64 sal_info_type)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, 0,
- 0, 0, 0, 0, 0);
- if (isrv.status)
- return 0;
- return isrv.v0;
-}
-
-/*
- * Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from
- * the monarch processor. Must not lock, because it will not return on any cpu until the
- * monarch processor sends a wake up.
- */
-static inline s64
-ia64_sal_mc_rendez (void)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
- return isrv.status;
-}
-
-/*
- * Allow the OS to specify the interrupt number to be used by SAL to interrupt OS during
- * the machine check rendezvous sequence as well as the mechanism to wake up the
- * non-monarch processor at the end of machine check processing.
- * Returns the complete ia64_sal_retval because some calls return more than just a status
- * value.
- */
-static inline struct ia64_sal_retval
-ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout, u64 rz_always)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val,
- timeout, rz_always, 0, 0);
- return isrv;
-}
-
-/* Read from PCI configuration space */
-static inline s64
-ia64_sal_pci_config_read (u64 pci_config_addr, int type, u64 size, u64 *value)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size, type, 0, 0, 0, 0);
- if (value)
- *value = isrv.v0;
- return isrv.status;
-}
-
-/* Write to PCI configuration space */
-static inline s64
-ia64_sal_pci_config_write (u64 pci_config_addr, int type, u64 size, u64 value)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value,
- type, 0, 0, 0);
- return isrv.status;
-}
-
-/*
- * Register physical addresses of locations needed by SAL when SAL procedures are invoked
- * in virtual mode.
- */
-static inline s64
-ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr,
- 0, 0, 0, 0, 0);
- return isrv.status;
-}
-
-/*
- * Register software dependent code locations within SAL. These locations are handlers or
- * entry points where SAL will pass control for the specified event. These event handlers
- * are for the bott rendezvous, MCAs and INIT scenarios.
- */
-static inline s64
-ia64_sal_set_vectors (u64 vector_type,
- u64 handler_addr1, u64 gp1, u64 handler_len1,
- u64 handler_addr2, u64 gp2, u64 handler_len2)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL(isrv, SAL_SET_VECTORS, vector_type,
- handler_addr1, gp1, handler_len1,
- handler_addr2, gp2, handler_len2);
-
- return isrv.status;
-}
-
-/* Update the contents of PAL block in the non-volatile storage device */
-static inline s64
-ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
- u64 *error_code, u64 *scratch_buf_size_needed)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size,
- 0, 0, 0, 0);
- if (error_code)
- *error_code = isrv.v0;
- if (scratch_buf_size_needed)
- *scratch_buf_size_needed = isrv.v1;
- return isrv.status;
-}
-
-/* Get physical processor die mapping in the platform. */
-static inline s64
-ia64_sal_physical_id_info(u16 *splid)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
- if (splid)
- *splid = isrv.v0;
- return isrv.status;
-}
-
-extern unsigned long sal_platform_features;
-
-extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *);
-
-struct sal_ret_values {
- long r8; long r9; long r10; long r11;
-};
-
-#define IA64_SAL_OEMFUNC_MIN 0x02000000
-#define IA64_SAL_OEMFUNC_MAX 0x03ffffff
-
-extern int ia64_sal_oemcall(struct ia64_sal_retval *, u64, u64, u64, u64, u64,
- u64, u64, u64);
-extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64,
- u64, u64, u64, u64, u64);
-extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
- u64, u64, u64, u64, u64);
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * System Abstraction Layer Specification
- * Section 3.2.5.1: OS_BOOT_RENDEZ to SAL return State.
- * Note: region regs are stored first in head.S _start. Hence they must
- * stay up front.
- */
-struct sal_to_os_boot {
- u64 rr[8]; /* Region Registers */
- u64 br[6]; /* br0: return addr into SAL boot rendez routine */
- u64 gr1; /* SAL:GP */
- u64 gr12; /* SAL:SP */
- u64 gr13; /* SAL: Task Pointer */
- u64 fpsr;
- u64 pfs;
- u64 rnat;
- u64 unat;
- u64 bspstore;
- u64 dcr; /* Default Control Register */
- u64 iva;
- u64 pta;
- u64 itv;
- u64 pmv;
- u64 cmcv;
- u64 lrr[2];
- u64 gr[4];
- u64 pr; /* Predicate registers */
- u64 lc; /* Loop Count */
- struct ia64_fpreg fp[20];
-};
-
-/*
- * Global array allocated for NR_CPUS at boot time
- */
-extern struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
-
-extern void ia64_jump_to_sal(struct sal_to_os_boot *);
-#endif
-
-extern void ia64_sal_handler_init(void *entry_point, void *gpval);
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_IA64_SAL_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sections.h b/xen/include/asm-ia64/linux-xen/asm/sections.h
deleted file mode 100644
index a6334c6b94..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sections.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef _ASM_IA64_SECTIONS_H
-#define _ASM_IA64_SECTIONS_H
-
-/*
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <asm-generic/sections.h>
-
-extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
-#ifdef XEN
-#ifdef CONFIG_SMP
-extern char __cpu0_per_cpu[];
-#endif
-#endif
-extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
-extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
-extern char __start_gate_section[];
-extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[];
-extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];
-extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[];
-extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_bubble_down_patchlist[];
-extern char __start_unwind[], __end_unwind[];
-extern char __start_ivt_text[], __end_ivt_text[];
-
-#endif /* _ASM_IA64_SECTIONS_H */
-
diff --git a/xen/include/asm-ia64/linux-xen/asm/smp.h b/xen/include/asm-ia64/linux-xen/asm/smp.h
deleted file mode 100644
index ae4fcc2623..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/smp.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * SMP Support
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * (c) Copyright 2001-2003, 2005 Hewlett-Packard Development Company, L.P.
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- */
-#ifndef _ASM_IA64_SMP_H
-#define _ASM_IA64_SMP_H
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/threads.h>
-#include <linux/kernel.h>
-#include <linux/cpumask.h>
-
-#include <asm/bitops.h>
-#include <asm/io.h>
-#include <asm/param.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-
-static inline unsigned int
-ia64_get_lid (void)
-{
- union {
- struct {
- unsigned long reserved : 16;
- unsigned long eid : 8;
- unsigned long id : 8;
- unsigned long ignored : 32;
- } f;
- unsigned long bits;
- } lid;
-
- lid.bits = ia64_getreg(_IA64_REG_CR_LID);
- return lid.f.id << 8 | lid.f.eid;
-}
-
-#ifdef CONFIG_SMP
-
-#define XTP_OFFSET 0x1e0008
-
-#define SMP_IRQ_REDIRECTION (1 << 0)
-#define SMP_IPI_REDIRECTION (1 << 1)
-
-#ifdef XEN
-#define raw_smp_processor_id() (current->processor)
-#else
-#define raw_smp_processor_id() (current_thread_info()->cpu)
-#endif
-
-extern struct smp_boot_data {
- int cpu_count;
- int cpu_phys_id[NR_CPUS];
-} smp_boot_data __initdata;
-
-extern char no_int_routing __devinitdata;
-
-extern cpumask_t cpu_online_map;
-#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
-
-DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask);
-DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask);
-extern int smp_num_siblings;
-extern int smp_num_cpucores;
-extern void __iomem *ipi_base_addr;
-extern unsigned char smp_int_redirect;
-
-extern volatile int ia64_cpu_to_sapicid[];
-#define cpu_physical_id(i) ia64_cpu_to_sapicid[i]
-
-extern unsigned long ap_wakeup_vector;
-
-/*
- * Function to map hard smp processor id to logical id. Slow, so don't use this in
- * performance-critical code.
- */
-static inline int
-cpu_logical_id (int cpuid)
-{
- int i;
-
- for (i = 0; i < NR_CPUS; ++i)
- if (cpu_physical_id(i) == cpuid)
- break;
- return i;
-}
-
-/*
- * XTP control functions:
- * min_xtp : route all interrupts to this CPU
- * normal_xtp: nominal XTP value
- * max_xtp : never deliver interrupts to this CPU.
- */
-
-static inline void
-min_xtp (void)
-{
- if (smp_int_redirect & SMP_IRQ_REDIRECTION)
- writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
-}
-
-static inline void
-normal_xtp (void)
-{
- if (smp_int_redirect & SMP_IRQ_REDIRECTION)
- writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
-}
-
-static inline void
-max_xtp (void)
-{
- if (smp_int_redirect & SMP_IRQ_REDIRECTION)
- writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
-}
-
-#define hard_smp_processor_id() ia64_get_lid()
-
-/* Upping and downing of CPUs */
-extern void cpu_die (void) __attribute__ ((noreturn));
-extern void __init smp_build_cpu_map(void);
-
-extern void __init init_smp_config (void);
-extern void smp_do_timer (struct pt_regs *regs);
-
-extern int smp_call_function_single (int cpuid, void (*func) (void *info),
- void *info, int wait);
-extern void smp_send_reschedule (int cpu);
-#ifdef XEN
-extern void lock_ipi_calllock(unsigned long *flags);
-extern void unlock_ipi_calllock(unsigned long flags);
-#else
-extern void lock_ipi_calllock(void);
-extern void unlock_ipi_calllock(void);
-#endif
-extern void identify_siblings (struct cpuinfo_ia64 *);
-
-#else
-
-#define cpu_logical_id(i) 0
-#define cpu_physical_id(i) ia64_get_lid()
-
-#endif /* CONFIG_SMP */
-#endif /* _ASM_IA64_SMP_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/README.origin b/xen/include/asm-ia64/linux-xen/asm/sn/README.origin
deleted file mode 100644
index ba80c66319..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sn/README.origin
+++ /dev/null
@@ -1,17 +0,0 @@
-# Source files in this directory are near-identical copies of linux-2.6.19
-# files:
-
-# NOTE: ALL changes to these files should be clearly marked
-# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
-# easily updated to future versions of the corresponding Linux files.
-
-addrs.h -> linux/include/asm-ia64/sn/addrs.h
-arch.h -> linux/include/asm-ia64/sn/arch.h
-hubdev.h -> linux/arch/ia64/sn/include/xtalk/hubdev.h
-intr.h -> linux/include/asm-ia64/sn/intr.h
-io.h -> linux/include/asm-ia64/sn/io.h
-nodepda.h -> linux/include/asm-ia64/sn/nodepda.h
-pcibr_provider.h -> linux/include/asm-ia64/sn/pcibr_provider.h
-pcidev.h -> linux/include/asm-ia64/sn/pcidev.h
-rw_mmr.h -> linux/include/asm-ia64/sn/rw_mmr.h
-types.h -> linux/include/asm-ia64/sn/types.h
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/addrs.h b/xen/include/asm-ia64/linux-xen/asm/sn/addrs.h
deleted file mode 100644
index 531a880a7b..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sn/addrs.h
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_ADDRS_H
-#define _ASM_IA64_SN_ADDRS_H
-
-#include <asm/percpu.h>
-#include <asm/sn/types.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/pda.h>
-
-/*
- * Memory/SHUB Address Format:
- * +-+---------+--+--------------+
- * |0| NASID |AS| NodeOffset |
- * +-+---------+--+--------------+
- *
- * NASID: (low NASID bit is 0) Memory and SHUB MMRs
- * AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0
- * 00: Local Resources and MMR space
- * Top bit of NodeOffset
- * 0: Local resources space
- * node id:
- * 0: IA64/NT compatibility space
- * 2: Local MMR Space
- * 4: Local memory, regardless of local node id
- * 1: Global MMR space
- * 01: GET space.
- * 10: AMO space.
- * 11: Cacheable memory space.
- *
- * NodeOffset: byte offset
- *
- *
- * TIO address format:
- * +-+----------+--+--------------+
- * |0| NASID |AS| Nodeoffset |
- * +-+----------+--+--------------+
- *
- * NASID: (low NASID bit is 1) TIO
- * AS: 2-bit Chiplet Identifier
- * 00: TIO LB (Indicates TIO MMR access.)
- * 01: TIO ICE (indicates coretalk space access.)
- *
- * NodeOffset: top bit must be set.
- *
- *
- * Note that in both of the above address formats, the low
- * NASID bit indicates if the reference is to the SHUB or TIO MMRs.
- */
-
-
-/*
- * Define basic shift & mask constants for manipulating NASIDs and AS values.
- */
-#define NASID_BITMASK (sn_hub_info->nasid_bitmask)
-#define NASID_SHIFT (sn_hub_info->nasid_shift)
-#define AS_SHIFT (sn_hub_info->as_shift)
-#define AS_BITMASK 0x3UL
-
-#define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT)
-#define AS_MASK ((u64)AS_BITMASK << AS_SHIFT)
-
-
-/*
- * AS values. These are the same on both SHUB1 & SHUB2.
- */
-#define AS_GET_VAL 1UL
-#define AS_AMO_VAL 2UL
-#define AS_CAC_VAL 3UL
-#define AS_GET_SPACE (AS_GET_VAL << AS_SHIFT)
-#define AS_AMO_SPACE (AS_AMO_VAL << AS_SHIFT)
-#define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT)
-
-
-/*
- * Virtual Mode Local & Global MMR space.
- */
-#define SH1_LOCAL_MMR_OFFSET 0x8000000000UL
-#define SH2_LOCAL_MMR_OFFSET 0x0200000000UL
-#define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET)
-#define LOCAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET)
-#define LOCAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET)
-
-#define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL
-#define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL
-#define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET)
-#define GLOBAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET)
-
-/*
- * Physical mode addresses
- */
-#define GLOBAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET)
-
-
-/*
- * Clear region & AS bits.
- */
-#define TO_PHYS_MASK (~(RGN_BITS | AS_MASK))
-
-
-/*
- * Misc NASID manipulation.
- */
-#define NASID_SPACE(n) ((u64)(n) << NASID_SHIFT)
-#define REMOTE_ADDR(n,a) (NASID_SPACE(n) | (a))
-#define NODE_OFFSET(x) ((x) & (NODE_ADDRSPACE_SIZE - 1))
-#define NODE_ADDRSPACE_SIZE (1UL << AS_SHIFT)
-#define NASID_GET(x) (int) (((u64) (x) >> NASID_SHIFT) & NASID_BITMASK)
-#define LOCAL_MMR_ADDR(a) (LOCAL_MMR_SPACE | (a))
-#define GLOBAL_MMR_ADDR(n,a) (GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a))
-#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
-#define GLOBAL_CAC_ADDR(n,a) (CAC_BASE | REMOTE_ADDR(n,a))
-#define CHANGE_NASID(n,x) ((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n)))
-#define IS_TIO_NASID(n) ((n) & 1)
-
-
-/* non-II mmr's start at top of big window space (4G) */
-#define BWIN_TOP 0x0000000100000000UL
-
-/*
- * general address defines
- */
-#define CAC_BASE (PAGE_OFFSET | AS_CAC_SPACE)
-#define AMO_BASE (__IA64_UNCACHED_OFFSET | AS_AMO_SPACE)
-#define AMO_PHYS_BASE (RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE)
-#define GET_BASE (PAGE_OFFSET | AS_GET_SPACE)
-
-/*
- * Convert Memory addresses between various addressing modes.
- */
-#define TO_PHYS(x) (TO_PHYS_MASK & (x))
-#define TO_CAC(x) (CAC_BASE | TO_PHYS(x))
-#if defined(CONFIG_SGI_SN) || defined(XEN)
-#define TO_AMO(x) (AMO_BASE | TO_PHYS(x))
-#define TO_GET(x) (GET_BASE | TO_PHYS(x))
-#else
-#define TO_AMO(x) ({ BUG(); x; })
-#define TO_GET(x) ({ BUG(); x; })
-#endif
-
-/*
- * Covert from processor physical address to II/TIO physical address:
- * II - squeeze out the AS bits
- * TIO- requires a chiplet id in bits 38-39. For DMA to memory,
- * the chiplet id is zero. If we implement TIO-TIO dma, we might need
- * to insert a chiplet id into this macro. However, it is our belief
- * right now that this chiplet id will be ICE, which is also zero.
- */
-#define SH1_TIO_PHYS_TO_DMA(x) \
- ((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
-
-#define SH2_NETWORK_BANK_OFFSET(x) \
- ((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1))
-
-#define SH2_NETWORK_BANK_SELECT(x) \
- ((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4))) \
- >> (sn_hub_info->nasid_shift - 4)) << 36)
-
-#define SH2_NETWORK_ADDRESS(x) \
- (SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x))
-
-#define SH2_TIO_PHYS_TO_DMA(x) \
- (((u64)(NASID_GET(x)) << 40) | SH2_NETWORK_ADDRESS(x))
-
-#define PHYS_TO_TIODMA(x) \
- (is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x))
-
-#define PHYS_TO_DMA(x) \
- ((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
-
-
-/*
- * Macros to test for address type.
- */
-#define IS_AMO_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE)
-#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE)
-
-
-/*
- * The following definitions pertain to the IO special address
- * space. They define the location of the big and little windows
- * of any given node.
- */
-#define BWIN_SIZE_BITS 29 /* big window size: 512M */
-#define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */
-#define NODE_SWIN_BASE(n, w) ((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \
- : RAW_NODE_SWIN_BASE(n, w))
-#define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \
- ((u64) (w) << TIO_SWIN_SIZE_BITS))
-#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n))
-#define TIO_IO_BASE(n) (__IA64_UNCACHED_OFFSET | NASID_SPACE(n))
-#define BWIN_SIZE (1UL << BWIN_SIZE_BITS)
-#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE)
-#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
-#define RAW_NODE_SWIN_BASE(n, w) (NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS))
-#define BWIN_WIDGET_MASK 0x7
-#define BWIN_WINDOWNUM(x) (((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
-#define SH1_IS_BIG_WINDOW_ADDR(x) ((x) & BWIN_TOP)
-
-#define TIO_BWIN_WINDOW_SELECT_MASK 0x7
-#define TIO_BWIN_WINDOWNUM(x) (((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK)
-
-#define TIO_HWIN_SHIFT_BITS 33
-#define TIO_HWIN(x) (NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS)
-
-/*
- * The following definitions pertain to the IO special address
- * space. They define the location of the big and little windows
- * of any given node.
- */
-
-#define SWIN_SIZE_BITS 24
-#define SWIN_WIDGET_MASK 0xF
-
-#define TIO_SWIN_SIZE_BITS 28
-#define TIO_SWIN_SIZE (1UL << TIO_SWIN_SIZE_BITS)
-#define TIO_SWIN_WIDGET_MASK 0x3
-
-/*
- * Convert smallwindow address to xtalk address.
- *
- * 'addr' can be physical or virtual address, but will be converted
- * to Xtalk address in the range 0 -> SWINZ_SIZEMASK
- */
-#define SWIN_WIDGETNUM(x) (((x) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
-#define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
-
-
-/*
- * The following macros produce the correct base virtual address for
- * the hub registers. The REMOTE_HUB_* macro produce
- * the address for the specified hub's registers. The intent is
- * that the appropriate PI, MD, NI, or II register would be substituted
- * for x.
- *
- * WARNING:
- * When certain Hub chip workaround are defined, it's not sufficient
- * to dereference the *_HUB_ADDR() macros. You should instead use
- * HUB_L() and HUB_S() if you must deal with pointers to hub registers.
- * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
- * They're always safe.
- */
-/* Shub1 TIO & MMR addressing macros */
-#define SH1_TIO_IOSPACE_ADDR(n,x) \
- GLOBAL_MMR_ADDR(n,x)
-
-#define SH1_REMOTE_BWIN_MMR(n,x) \
- GLOBAL_MMR_ADDR(n,x)
-
-#define SH1_REMOTE_SWIN_MMR(n,x) \
- (NODE_SWIN_BASE(n,1) + 0x800000UL + (x))
-
-#define SH1_REMOTE_MMR(n,x) \
- (SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) : \
- SH1_REMOTE_SWIN_MMR(n,x))
-
-/* Shub1 TIO & MMR addressing macros */
-#define SH2_TIO_IOSPACE_ADDR(n,x) \
- ((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2)))
-
-#define SH2_REMOTE_MMR(n,x) \
- GLOBAL_MMR_ADDR(n,x)
-
-
-/* TIO & MMR addressing macros that work on both shub1 & shub2 */
-#define TIO_IOSPACE_ADDR(n,x) \
- ((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) : \
- SH2_TIO_IOSPACE_ADDR(n,x)))
-
-#define SH_REMOTE_MMR(n,x) \
- (is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x))
-
-#define REMOTE_HUB_ADDR(n,x) \
- (IS_TIO_NASID(n) ? ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) : \
- ((volatile u64*)SH_REMOTE_MMR(n,x)))
-
-
-#define HUB_L(x) (*((volatile typeof(*x) *)x))
-#define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d))
-
-#define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a)))
-#define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d))
-
-/*
- * Coretalk address breakdown
- */
-#define CTALK_NASID_SHFT 40
-#define CTALK_NASID_MASK (0x3FFFULL << CTALK_NASID_SHFT)
-#define CTALK_CID_SHFT 38
-#define CTALK_CID_MASK (0x3ULL << CTALK_CID_SHFT)
-#define CTALK_NODE_OFFSET 0x3FFFFFFFFF
-
-#endif /* _ASM_IA64_SN_ADDRS_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/arch.h b/xen/include/asm-ia64/linux-xen/asm/sn/arch.h
deleted file mode 100644
index a159c728b9..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sn/arch.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * SGI specific setup.
- *
- * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
- * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
- */
-#ifndef _ASM_IA64_SN_ARCH_H
-#define _ASM_IA64_SN_ARCH_H
-
-#ifndef XEN
-#include <linux/numa.h>
-#include <asm/types.h>
-#include <asm/percpu.h>
-#include <asm/sn/types.h>
-#endif
-#include <asm/sn/sn_cpuid.h>
-
-/*
- * This is the maximum number of NUMALINK nodes that can be part of a single
- * SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in
- * remote partitions are NOT included in this number.
- * The number of compact nodes cannot exceed size of a coherency domain.
- * The purpose of this define is to specify a node count that includes
- * all C/M/TIO nodes in an SSI system.
- *
- * SGI system can currently support up to 256 C/M nodes plus additional TIO nodes.
- *
- * Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade
- * to ACPI3.0, this limit will be removed. The notion of "compact nodes"
- * should be deleted and TIOs should be included in MAX_NUMNODES.
- */
-#define MAX_TIO_NODES MAX_NUMNODES
-#define MAX_COMPACT_NODES (MAX_NUMNODES + MAX_TIO_NODES)
-
-/*
- * Maximum number of nodes in all partitions and in all coherency domains.
- * This is the total number of nodes accessible in the numalink fabric. It
- * includes all C & M bricks, plus all TIOs.
- *
- * This value is also the value of the maximum number of NASIDs in the numalink
- * fabric.
- */
-#define MAX_NUMALINK_NODES 16384
-
-/*
- * The following defines attributes of the HUB chip. These attributes are
- * frequently referenced. They are kept in the per-cpu data areas of each cpu.
- * They are kept together in a struct to minimize cache misses.
- */
-struct sn_hub_info_s {
- u8 shub2;
- u8 nasid_shift;
- u8 as_shift;
- u8 shub_1_1_found;
- u16 nasid_bitmask;
-};
-DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
-#define sn_hub_info (&__get_cpu_var(__sn_hub_info))
-#ifndef XEN
-#define is_shub2() (sn_hub_info->shub2)
-#define is_shub1() (sn_hub_info->shub2 == 0)
-#else
-#define is_shub2() 0
-#define is_shub1() 1
-#endif
-
-/*
- * Use this macro to test if shub 1.1 wars should be enabled
- */
-#define enable_shub_wars_1_1() (sn_hub_info->shub_1_1_found)
-
-
-/*
- * Compact node ID to nasid mappings kept in the per-cpu data areas of each
- * cpu.
- */
-DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
-#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
-
-#ifndef XEN
-extern u8 sn_partition_id;
-extern u8 sn_system_size;
-extern u8 sn_sharing_domain_size;
-extern u8 sn_region_size;
-
-extern void sn_flush_all_caches(long addr, long bytes);
-#endif
-#endif /* _ASM_IA64_SN_ARCH_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h b/xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h
deleted file mode 100644
index 96bbfe2a1f..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
-#define _ASM_IA64_SN_XTALK_HUBDEV_H
-
-#ifndef XEN
-#include "xtalk/xwidgetdev.h"
-#else
-#include <asm/sn/xwidgetdev.h>
-#endif
-
-#define HUB_WIDGET_ID_MAX 0xf
-#define DEV_PER_WIDGET (2*2*8)
-#define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */
-#define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1)
-#define IIO_ITTE_WIDGET_SHIFT 8
-
-#define IIO_ITTE_WIDGET(itte) \
- (((itte) >> IIO_ITTE_WIDGET_SHIFT) & IIO_ITTE_WIDGET_MASK)
-
-/*
- * Use the top big window as a surrogate for the first small window
- */
-#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
-#define IIO_NUM_ITTES 7
-#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
-
-/* This struct is shared between the PROM and the kernel.
- * Changes to this struct will require corresponding changes to the kernel.
- */
-struct sn_flush_device_common {
- int sfdl_bus;
- int sfdl_slot;
- int sfdl_pin;
- struct common_bar_list {
- unsigned long start;
- unsigned long end;
- } sfdl_bar_list[6];
- unsigned long sfdl_force_int_addr;
- unsigned long sfdl_flush_value;
- volatile unsigned long *sfdl_flush_addr;
- u32 sfdl_persistent_busnum;
- u32 sfdl_persistent_segment;
- struct pcibus_info *sfdl_pcibus_info;
-};
-
-/* This struct is kernel only and is not used by the PROM */
-struct sn_flush_device_kernel {
- spinlock_t sfdl_flush_lock;
- struct sn_flush_device_common *common;
-};
-
-/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included
- * for older official PROMs to function on the new kernel base. This struct
- * will be removed when the next official PROM release occurs. */
-
-struct sn_flush_device_war {
- struct sn_flush_device_common common;
- u32 filler; /* older PROMs expect the default size of a spinlock_t */
-};
-
-/*
- * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel.
- */
-struct sn_flush_nasid_entry {
- struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num
- u64 iio_itte[8];
-};
-
-struct hubdev_info {
- geoid_t hdi_geoid;
- short hdi_nasid;
- short hdi_peer_nasid; /* Dual Porting Peer */
-
- struct sn_flush_nasid_entry hdi_flush_nasid_list;
- struct xwidget_info hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1];
-
-
- void *hdi_nodepda;
- void *hdi_node_vertex;
- u32 max_segment_number;
- u32 max_pcibus_number;
-};
-
-extern void hubdev_init_node(nodepda_t *, cnodeid_t);
-extern void hub_error_init(struct hubdev_info *);
-extern void ice_error_init(struct hubdev_info *);
-
-
-#endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/intr.h b/xen/include/asm-ia64/linux-xen/asm/sn/intr.h
deleted file mode 100644
index 5e7ef5c993..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sn/intr.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_INTR_H
-#define _ASM_IA64_SN_INTR_H
-
-#ifndef XEN
-#include <linux/rcupdate.h>
-#else
-#include <linux/list.h>
-#endif
-#include <asm/sn/types.h>
-
-#define SGI_UART_VECTOR 0xe9
-
-/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
-#define SGI_XPC_ACTIVATE 0x30
-#define SGI_II_ERROR 0x31
-#define SGI_XBOW_ERROR 0x32
-#define SGI_PCIASIC_ERROR 0x33
-#define SGI_ACPI_SCI_INT 0x34
-#define SGI_TIOCA_ERROR 0x35
-#define SGI_TIO_ERROR 0x36
-#define SGI_TIOCX_ERROR 0x37
-#define SGI_MMTIMER_VECTOR 0x38
-#define SGI_XPC_NOTIFY 0xe7
-
-#define IA64_SN2_FIRST_DEVICE_VECTOR 0x3c
-#define IA64_SN2_LAST_DEVICE_VECTOR 0xe6
-
-#define SN2_IRQ_RESERVED 0x1
-#define SN2_IRQ_CONNECTED 0x2
-#define SN2_IRQ_SHARED 0x4
-
-// The SN PROM irq struct
-struct sn_irq_info {
- struct sn_irq_info *irq_next; /* deprecated DO NOT USE */
- short irq_nasid; /* Nasid IRQ is assigned to */
- int irq_slice; /* slice IRQ is assigned to */
- int irq_cpuid; /* kernel logical cpuid */
- int irq_irq; /* the IRQ number */
- int irq_int_bit; /* Bridge interrupt pin */
- /* <0 means MSI */
- u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */
- int irq_bridge_type;/* pciio asic type (pciio.h) */
- void *irq_bridge; /* bridge generating irq */
- void *irq_pciioinfo; /* associated pciio_info_t */
- int irq_last_intr; /* For Shub lb lost intr WAR */
- int irq_cookie; /* unique cookie */
- int irq_flags; /* flags */
- int irq_share_cnt; /* num devices sharing IRQ */
- struct list_head list; /* list of sn_irq_info structs */
-#ifndef XEN
- struct rcu_head rcu; /* rcu callback list */
-#endif
-};
-
-extern void sn_send_IPI_phys(int, long, int, int);
-extern u64 sn_intr_alloc(nasid_t, int,
- struct sn_irq_info *,
- int, nasid_t, int);
-extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
-extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int);
-extern struct list_head **sn_irq_lh;
-
-#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
-
-#endif /* _ASM_IA64_SN_INTR_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/io.h b/xen/include/asm-ia64/linux-xen/asm/sn/io.h
deleted file mode 100644
index 78585a22cf..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sn/io.h
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_SN_IO_H
-#define _ASM_SN_IO_H
-#include <linux/compiler.h>
-#include <asm/intrinsics.h>
-
-extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
-extern void __sn_mmiowb(void); /* Forward definition */
-
-extern int num_cnodes;
-
-#define __sn_mf_a() ia64_mfa()
-
-#ifdef XEN
-/*
- * Xen doesn't deal with any PIC devices directly, it's all handled in dom0
- */
-#define sn_dma_flush(foo) do {} while(0)
-#else
-extern void sn_dma_flush(unsigned long);
-#endif
-
-#define __sn_inb ___sn_inb
-#define __sn_inw ___sn_inw
-#define __sn_inl ___sn_inl
-#define __sn_outb ___sn_outb
-#define __sn_outw ___sn_outw
-#define __sn_outl ___sn_outl
-#define __sn_readb ___sn_readb
-#define __sn_readw ___sn_readw
-#define __sn_readl ___sn_readl
-#define __sn_readq ___sn_readq
-#define __sn_readb_relaxed ___sn_readb_relaxed
-#define __sn_readw_relaxed ___sn_readw_relaxed
-#define __sn_readl_relaxed ___sn_readl_relaxed
-#define __sn_readq_relaxed ___sn_readq_relaxed
-
-/*
- * Convenience macros for setting/clearing bits using the above accessors
- */
-
-#define __sn_setq_relaxed(addr, val) \
- writeq((__sn_readq_relaxed(addr) | (val)), (addr))
-#define __sn_clrq_relaxed(addr, val) \
- writeq((__sn_readq_relaxed(addr) & ~(val)), (addr))
-
-/*
- * The following routines are SN Platform specific, called when
- * a reference is made to inX/outX set macros. SN Platform
- * inX set of macros ensures that Posted DMA writes on the
- * Bridge is flushed.
- *
- * The routines should be self explainatory.
- */
-
-static inline unsigned int
-___sn_inb (unsigned long port)
-{
- volatile unsigned char *addr;
- unsigned char ret = -1;
-
- if ((addr = sn_io_addr(port))) {
- ret = *addr;
- __sn_mf_a();
- sn_dma_flush((unsigned long)addr);
- }
- return ret;
-}
-
-static inline unsigned int
-___sn_inw (unsigned long port)
-{
- volatile unsigned short *addr;
- unsigned short ret = -1;
-
- if ((addr = sn_io_addr(port))) {
- ret = *addr;
- __sn_mf_a();
- sn_dma_flush((unsigned long)addr);
- }
- return ret;
-}
-
-static inline unsigned int
-___sn_inl (unsigned long port)
-{
- volatile unsigned int *addr;
- unsigned int ret = -1;
-
- if ((addr = sn_io_addr(port))) {
- ret = *addr;
- __sn_mf_a();
- sn_dma_flush((unsigned long)addr);
- }
- return ret;
-}
-
-static inline void
-___sn_outb (unsigned char val, unsigned long port)
-{
- volatile unsigned char *addr;
-
- if ((addr = sn_io_addr(port))) {
- *addr = val;
- __sn_mmiowb();
- }
-}
-
-static inline void
-___sn_outw (unsigned short val, unsigned long port)
-{
- volatile unsigned short *addr;
-
- if ((addr = sn_io_addr(port))) {
- *addr = val;
- __sn_mmiowb();
- }
-}
-
-static inline void
-___sn_outl (unsigned int val, unsigned long port)
-{
- volatile unsigned int *addr;
-
- if ((addr = sn_io_addr(port))) {
- *addr = val;
- __sn_mmiowb();
- }
-}
-
-/*
- * The following routines are SN Platform specific, called when
- * a reference is made to readX/writeX set macros. SN Platform
- * readX set of macros ensures that Posted DMA writes on the
- * Bridge is flushed.
- *
- * The routines should be self explainatory.
- */
-
-static inline unsigned char
-___sn_readb (const volatile void __iomem *addr)
-{
- unsigned char val;
-
- val = *(volatile unsigned char __force *)addr;
- __sn_mf_a();
- sn_dma_flush((unsigned long)addr);
- return val;
-}
-
-static inline unsigned short
-___sn_readw (const volatile void __iomem *addr)
-{
- unsigned short val;
-
- val = *(volatile unsigned short __force *)addr;
- __sn_mf_a();
- sn_dma_flush((unsigned long)addr);
- return val;
-}
-
-static inline unsigned int
-___sn_readl (const volatile void __iomem *addr)
-{
- unsigned int val;
-
- val = *(volatile unsigned int __force *)addr;
- __sn_mf_a();
- sn_dma_flush((unsigned long)addr);
- return val;
-}
-
-static inline unsigned long
-___sn_readq (const volatile void __iomem *addr)
-{
- unsigned long val;
-
- val = *(volatile unsigned long __force *)addr;
- __sn_mf_a();
- sn_dma_flush((unsigned long)addr);
- return val;
-}
-
-/*
- * For generic and SN2 kernels, we have a set of fast access
- * PIO macros. These macros are provided on SN Platform
- * because the normal inX and readX macros perform an
- * additional task of flushing Post DMA request on the Bridge.
- *
- * These routines should be self explainatory.
- */
-
-static inline unsigned int
-sn_inb_fast (unsigned long port)
-{
- volatile unsigned char *addr = (unsigned char *)port;
- unsigned char ret;
-
- ret = *addr;
- __sn_mf_a();
- return ret;
-}
-
-static inline unsigned int
-sn_inw_fast (unsigned long port)
-{
- volatile unsigned short *addr = (unsigned short *)port;
- unsigned short ret;
-
- ret = *addr;
- __sn_mf_a();
- return ret;
-}
-
-static inline unsigned int
-sn_inl_fast (unsigned long port)
-{
- volatile unsigned int *addr = (unsigned int *)port;
- unsigned int ret;
-
- ret = *addr;
- __sn_mf_a();
- return ret;
-}
-
-static inline unsigned char
-___sn_readb_relaxed (const volatile void __iomem *addr)
-{
- return *(volatile unsigned char __force *)addr;
-}
-
-static inline unsigned short
-___sn_readw_relaxed (const volatile void __iomem *addr)
-{
- return *(volatile unsigned short __force *)addr;
-}
-
-static inline unsigned int
-___sn_readl_relaxed (const volatile void __iomem *addr)
-{
- return *(volatile unsigned int __force *) addr;
-}
-
-static inline unsigned long
-___sn_readq_relaxed (const volatile void __iomem *addr)
-{
- return *(volatile unsigned long __force *) addr;
-}
-
-struct pci_dev;
-
-static inline int
-sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan)
-{
-
- if (vchan > 1) {
- return -1;
- }
-
- if (!(*addr >> 32)) /* Using a mask here would be cleaner */
- return 0; /* but this generates better code */
-
- if (vchan == 1) {
- /* Set Bit 57 */
- *addr |= (1UL << 57);
- } else {
- /* Clear Bit 57 */
- *addr &= ~(1UL << 57);
- }
-
- return 0;
-}
-
-#endif /* _ASM_SN_IO_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h b/xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h
deleted file mode 100644
index ffa4302423..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_NODEPDA_H
-#define _ASM_IA64_SN_NODEPDA_H
-
-
-#include <asm/semaphore.h>
-#include <asm/irq.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/intr.h>
-#ifndef XEN
-#include <asm/sn/bte.h>
-#endif
-
-/*
- * NUMA Node-Specific Data structures are defined in this file.
- * In particular, this is the location of the node PDA.
- * A pointer to the right node PDA is saved in each CPU PDA.
- */
-
-/*
- * Node-specific data structure.
- *
- * One of these structures is allocated on each node of a NUMA system.
- *
- * This structure provides a convenient way of keeping together
- * all per-node data structures.
- */
-struct phys_cpuid {
- short nasid;
- char subnode;
- char slice;
-};
-
-struct nodepda_s {
- void *pdinfo; /* Platform-dependent per-node info */
-
-#ifndef XEN
- /*
- * The BTEs on this node are shared by the local cpus
- */
- struct bteinfo_s bte_if[MAX_BTES_PER_NODE]; /* Virtual Interface */
- struct timer_list bte_recovery_timer;
- spinlock_t bte_recovery_lock;
-#endif
-
- /*
- * Array of pointers to the nodepdas for each node.
- */
- struct nodepda_s *pernode_pdaindr[MAX_COMPACT_NODES];
-
- /*
- * Array of physical cpu identifiers. Indexed by cpuid.
- */
- struct phys_cpuid phys_cpuid[NR_CPUS];
- spinlock_t ptc_lock ____cacheline_aligned_in_smp;
-};
-
-typedef struct nodepda_s nodepda_t;
-
-/*
- * Access Functions for node PDA.
- * Since there is one nodepda for each node, we need a convenient mechanism
- * to access these nodepdas without cluttering code with #ifdefs.
- * The next set of definitions provides this.
- * Routines are expected to use
- *
- * sn_nodepda - to access node PDA for the node on which code is running
- * NODEPDA(cnodeid) - to access node PDA for cnodeid
- */
-
-DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
-#define sn_nodepda (__get_cpu_var(__sn_nodepda))
-#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid])
-
-/*
- * Check if given a compact node id the corresponding node has all the
- * cpus disabled.
- */
-#define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0)
-
-#endif /* _ASM_IA64_SN_NODEPDA_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h b/xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h
deleted file mode 100644
index dc953eb2f4..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
-#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
-
-#ifdef XEN
-#include <linux/spinlock.h>
-#include <linux/linux-pci.h>
-#endif
-#include <asm/sn/intr.h>
-#include <asm/sn/pcibus_provider_defs.h>
-
-/* Workarounds */
-#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
-
-#define BUSTYPE_MASK 0x1
-
-/* Macros given a pcibus structure */
-#define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
-#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
- asic == PCIIO_ASIC_TYPE_TIOCP)
-#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
-
-
-/*
- * The different PCI Bridge types supported on the SGI Altix platforms
- */
-#define PCIBR_BRIDGETYPE_UNKNOWN -1
-#define PCIBR_BRIDGETYPE_PIC 2
-#define PCIBR_BRIDGETYPE_TIOCP 3
-
-/*
- * Bridge 64bit Direct Map Attributes
- */
-#define PCI64_ATTR_PREF (1ull << 59)
-#define PCI64_ATTR_PREC (1ull << 58)
-#define PCI64_ATTR_VIRTUAL (1ull << 57)
-#define PCI64_ATTR_BAR (1ull << 56)
-#define PCI64_ATTR_SWAP (1ull << 55)
-#define PCI64_ATTR_VIRTUAL1 (1ull << 54)
-
-#define PCI32_LOCAL_BASE 0
-#define PCI32_MAPPED_BASE 0x40000000
-#define PCI32_DIRECT_BASE 0x80000000
-
-#define IS_PCI32_MAPPED(x) ((u64)(x) < PCI32_DIRECT_BASE && \
- (u64)(x) >= PCI32_MAPPED_BASE)
-#define IS_PCI32_DIRECT(x) ((u64)(x) >= PCI32_MAPPED_BASE)
-
-
-/*
- * Bridge PMU Address Transaltion Entry Attibutes
- */
-#define PCI32_ATE_V (0x1 << 0)
-#define PCI32_ATE_CO (0x1 << 1)
-#define PCI32_ATE_PREC (0x1 << 2)
-#define PCI32_ATE_MSI (0x1 << 2)
-#define PCI32_ATE_PREF (0x1 << 3)
-#define PCI32_ATE_BAR (0x1 << 4)
-#define PCI32_ATE_ADDR_SHFT 12
-
-#define MINIMAL_ATES_REQUIRED(addr, size) \
- (IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
-
-#define MINIMAL_ATE_FLAG(addr, size) \
- (MINIMAL_ATES_REQUIRED((u64)addr, size) ? 1 : 0)
-
-/* bit 29 of the pci address is the SWAP bit */
-#define ATE_SWAPSHIFT 29
-#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT))
-#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT))
-
-/*
- * I/O page size
- */
-#if PAGE_SIZE < 16384
-#define IOPFNSHIFT 12 /* 4K per mapped page */
-#else
-#define IOPFNSHIFT 14 /* 16K per mapped page */
-#endif
-
-#define IOPGSIZE (1 << IOPFNSHIFT)
-#define IOPG(x) ((x) >> IOPFNSHIFT)
-#define IOPGOFF(x) ((x) & (IOPGSIZE-1))
-
-#define PCIBR_DEV_SWAP_DIR (1ull << 19)
-#define PCIBR_CTRL_PAGE_SIZE (0x1 << 21)
-
-/*
- * PMU resources.
- */
-struct ate_resource{
- u64 *ate;
- u64 num_ate;
- u64 lowest_free_index;
-};
-
-struct pcibus_info {
- struct pcibus_bussoft pbi_buscommon; /* common header */
- u32 pbi_moduleid;
- short pbi_bridge_type;
- short pbi_bridge_mode;
-
- struct ate_resource pbi_int_ate_resource;
- u64 pbi_int_ate_size;
-
- u64 pbi_dir_xbase;
- char pbi_hub_xid;
-
- u64 pbi_devreg[8];
-
- u32 pbi_valid_devices;
- u32 pbi_enabled_devices;
-
- spinlock_t pbi_lock;
-};
-
-extern int pcibr_init_provider(void);
-extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
-extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type);
-extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type);
-extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
-
-/*
- * prototypes for the bridge asic register access routines in pcibr_reg.c
- */
-extern void pcireg_control_bit_clr(struct pcibus_info *, u64);
-extern void pcireg_control_bit_set(struct pcibus_info *, u64);
-extern u64 pcireg_tflush_get(struct pcibus_info *);
-extern u64 pcireg_intr_status_get(struct pcibus_info *);
-extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, u64);
-extern void pcireg_intr_enable_bit_set(struct pcibus_info *, u64);
-extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, u64);
-extern void pcireg_force_intr_set(struct pcibus_info *, int);
-extern u64 pcireg_wrb_flush_get(struct pcibus_info *, int);
-extern void pcireg_int_ate_set(struct pcibus_info *, int, u64);
-extern u64 __iomem * pcireg_int_ate_addr(struct pcibus_info *, int);
-extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
-extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info);
-extern int pcibr_ate_alloc(struct pcibus_info *, int);
-extern void pcibr_ate_free(struct pcibus_info *, int);
-extern void ate_write(struct pcibus_info *, int, int, u64);
-extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device,
- void *resp);
-extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device,
- int action, void *resp);
-extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus);
-#endif
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/pcidev.h b/xen/include/asm-ia64/linux-xen/asm/sn/pcidev.h
deleted file mode 100644
index 17ae495e6a..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sn/pcidev.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
-#define _ASM_IA64_SN_PCI_PCIDEV_H
-
-#ifdef XEN
-#include <linux/linux-pci.h>
-#else
-#include <linux/pci.h>
-#endif
-
-/*
- * In ia64, pci_dev->sysdata must be a *pci_controller. To provide access to
- * the pcidev_info structs for all devices under a controller, we extend the
- * definition of pci_controller, via sn_pci_controller, to include a list
- * of pcidev_info.
- */
-struct sn_pci_controller {
- struct pci_controller pci_controller;
- struct list_head pcidev_info;
-};
-
-#define SN_PCI_CONTROLLER(dev) ((struct sn_pci_controller *) dev->sysdata)
-
-#define SN_PCIDEV_INFO(dev) sn_pcidev_info_get(dev)
-
-#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
- (struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
-/*
- * Given a pci_bus, return the sn pcibus_bussoft struct. Note that
- * this only works for root busses, not for busses represented by PPB's.
- */
-
-#define SN_PCIBUS_BUSSOFT(pci_bus) \
- ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
-
-#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
- (struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
-/*
- * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
- * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
- * due to possible PPB's in the path.
- */
-
-#define SN_PCIDEV_BUSSOFT(pci_dev) \
- (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
-
-#define SN_PCIDEV_BUSPROVIDER(pci_dev) \
- (SN_PCIDEV_INFO(pci_dev)->pdi_provider)
-
-#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
-#define PCIIO_SLOT_NONE 255
-#define PCIIO_FUNC_NONE 255
-#define PCIIO_VENDOR_ID_NONE (-1)
-
-struct pcidev_info {
- u64 pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
- u64 pdi_slot_host_handle; /* Bus and devfn Host pci_dev */
-
- struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */
- struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */
- struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
-
- struct sn_irq_info *pdi_sn_irq_info;
- struct sn_pcibus_provider *pdi_provider; /* sn pci ops */
- struct pci_dev *host_pci_dev; /* host bus link */
- struct list_head pdi_list; /* List of pcidev_info */
-};
-
-extern void sn_irq_fixup(struct pci_dev *pci_dev,
- struct sn_irq_info *sn_irq_info);
-extern void sn_irq_unfixup(struct pci_dev *pci_dev);
-extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *);
-extern void sn_pci_controller_fixup(int segment, int busnum,
- struct pci_bus *bus);
-extern void sn_bus_store_sysdata(struct pci_dev *dev);
-extern void sn_bus_free_sysdata(void);
-extern void sn_generate_path(struct pci_bus *pci_bus, char *address);
-extern void sn_pci_fixup_slot(struct pci_dev *dev);
-extern void sn_pci_unfixup_slot(struct pci_dev *dev);
-extern void sn_irq_lh_init(void);
-#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h b/xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h
deleted file mode 100644
index cccef83367..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved.
- */
-#ifndef _ASM_IA64_SN_RW_MMR_H
-#define _ASM_IA64_SN_RW_MMR_H
-
-
-/*
- * This file that access MMRs via uncached physical addresses.
- * pio_phys_read_mmr - read an MMR
- * pio_phys_write_mmr - write an MMR
- * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
- * Second MMR will be skipped if address is NULL
- *
- * Addresses passed to these routines should be uncached physical addresses
- * ie., 0x80000....
- */
-
-
-extern long pio_phys_read_mmr(volatile long *mmr);
-extern void pio_phys_write_mmr(volatile long *mmr, long val);
-#ifndef XEN
-extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2);
-#else
-extern void pio_atomic_phys_write_mmrs(volatile unsigned long *mmr1, long val1, volatile unsigned long *mmr2, long val2);
-#endif
-
-#endif /* _ASM_IA64_SN_RW_MMR_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/types.h b/xen/include/asm-ia64/linux-xen/asm/sn/types.h
deleted file mode 100644
index 6df228178a..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/sn/types.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (C) 1999 by Ralf Baechle
- */
-#ifndef _ASM_IA64_SN_TYPES_H
-#define _ASM_IA64_SN_TYPES_H
-
-#include <linux/types.h>
-
-typedef unsigned long cpuid_t;
-typedef signed short nasid_t; /* node id in numa-as-id space */
-typedef signed char partid_t; /* partition ID type */
-typedef unsigned int moduleid_t; /* user-visible module number type */
-typedef unsigned int cmoduleid_t; /* kernel compact module id type */
-typedef unsigned char slotid_t; /* slot (blade) within module */
-typedef unsigned char slabid_t; /* slab (asic) within slot */
-typedef u64 nic_t;
-typedef unsigned long iopaddr_t;
-#ifndef XEN
-typedef unsigned long paddr_t;
-#endif
-typedef short cnodeid_t;
-
-#endif /* _ASM_IA64_SN_TYPES_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/spinlock.h b/xen/include/asm-ia64/linux-xen/asm/spinlock.h
deleted file mode 100644
index 1653640790..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef _ASM_IA64_SPINLOCK_H
-#define _ASM_IA64_SPINLOCK_H
-
-/*
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- *
- * This file is used for SMP configurations only.
- */
-
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-
-#include <asm/atomic.h>
-#include <asm/bitops.h>
-#include <asm/intrinsics.h>
-#include <asm/system.h>
-
-#define DEBUG_SPINLOCK
-
-typedef struct {
- volatile unsigned int lock;
-} raw_spinlock_t;
-
-#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 0 }
-
-#define _raw_spin_is_locked(x) ((x)->lock != 0)
-#define _raw_spin_unlock(x) do { barrier(); (x)->lock = 0; } while (0)
-#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
-
-typedef struct {
- volatile unsigned int read_counter : 31;
- volatile unsigned int write_lock : 1;
-} raw_rwlock_t;
-#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 }
-
-#define _raw_read_lock(rw) \
-do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
- \
- while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
- ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
- while (*(volatile int *)__read_lock_ptr < 0) \
- cpu_relax(); \
- } \
-} while (0)
-
-#define _raw_read_unlock(rw) \
-do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
- ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
-} while (0)
-
-#ifdef ASM_SUPPORTED
-
-#define _raw_write_trylock(rw) \
-({ \
- register long result; \
- \
- __asm__ __volatile__ ( \
- "mov ar.ccv = r0\n" \
- "dep r29 = -1, r0, 31, 1;;\n" \
- "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
- : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
- (result == 0); \
-})
-
-#else /* !ASM_SUPPORTED */
-
-
-#define _raw_write_trylock(rw) \
-({ \
- __u64 ia64_val; \
- __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
- ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
- (ia64_val == 0); \
-})
-
-#endif /* !ASM_SUPPORTED */
-
-#define _raw_read_trylock(rw) ({ \
- raw_rwlock_t *__read_lock_ptr = (rw); \
- int orig = ia64_fetchadd(1, (int *) __read_lock_ptr, acq); \
- \
- if (unlikely(orig < 0)) \
- ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
- (orig >= 0); \
-})
-
-#define _raw_write_unlock(x) \
-({ \
- smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \
- clear_bit(31, (x)); \
-})
-
-#define _raw_rw_is_locked(x) (*(int *)(x) != 0)
-#define _raw_rw_is_write_locked(x) (test_bit(31, (x)))
-
-#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/system.h b/xen/include/asm-ia64/linux-xen/asm/system.h
deleted file mode 100644
index f749587493..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/system.h
+++ /dev/null
@@ -1,308 +0,0 @@
-#ifndef _ASM_IA64_SYSTEM_H
-#define _ASM_IA64_SYSTEM_H
-
-/*
- * System defines. Note that this is included both from .c and .S
- * files, so it does only defines, not any C code. This is based
- * on information published in the Processor Abstraction Layer
- * and the System Abstraction Layer manual.
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
- */
-#include <linux/config.h>
-
-#include <asm/kregs.h>
-#include <asm/page.h>
-#ifndef XEN
-#include <asm/pal.h>
-#endif
-#include <asm/percpu.h>
-
-#ifndef XEN
-#define GATE_ADDR __IA64_UL_CONST(0xa000000000000000)
-/*
- * 0xa000000000000000+2*PERCPU_PAGE_SIZE
- * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
- */
-#define KERNEL_START __IA64_UL_CONST(0xa000000100000000)
-#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
-#endif
-
-#ifndef __ASSEMBLY__
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-struct pci_vector_struct {
- __u16 segment; /* PCI Segment number */
- __u16 bus; /* PCI Bus number */
- __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
- __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
- __u32 irq; /* IRQ assigned */
-};
-
-extern struct ia64_boot_param {
- __u64 command_line; /* physical address of command line arguments */
- __u64 efi_systab; /* physical address of EFI system table */
- __u64 efi_memmap; /* physical address of EFI memory map */
- __u64 efi_memmap_size; /* size of EFI memory map */
- __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */
- __u32 efi_memdesc_version; /* memory descriptor version */
- struct {
- __u16 num_cols; /* number of columns on console output device */
- __u16 num_rows; /* number of rows on console output device */
- __u16 orig_x; /* cursor's x position */
- __u16 orig_y; /* cursor's y position */
- } console_info;
- __u64 fpswa; /* physical address of the fpswa interface */
- __u64 initrd_start;
- __u64 initrd_size;
-//for loading initrd for dom0
- __u64 domain_start; /* virtual address where the boot time domain begins */
- __u64 domain_size; /* how big is the boot domain */
-
-} *ia64_boot_param;
-
-/*
- * Macros to force memory ordering. In these descriptions, "previous"
- * and "subsequent" refer to program order; "visible" means that all
- * architecturally visible effects of a memory access have occurred
- * (at a minimum, this means the memory has been read or written).
- *
- * wmb(): Guarantees that all preceding stores to memory-
- * like regions are visible before any subsequent
- * stores and that all following stores will be
- * visible only after all previous stores.
- * rmb(): Like wmb(), but for reads.
- * mb(): wmb()/rmb() combo, i.e., all previous memory
- * accesses are visible before all subsequent
- * accesses and vice versa. This is also known as
- * a "fence."
- *
- * Note: "mb()" and its variants cannot be used as a fence to order
- * accesses to memory mapped I/O registers. For that, mf.a needs to
- * be used. However, we don't want to always use mf.a because (a)
- * it's (presumably) much slower than mf and (b) mf.a is supported for
- * sequential memory pages only.
- */
-#define mb() ia64_mf()
-#define rmb() mb()
-#define wmb() mb()
-#define read_barrier_depends() do { } while(0)
-
-#ifdef CONFIG_SMP
-# define smp_mb() mb()
-# define smp_rmb() rmb()
-# define smp_wmb() wmb()
-# define smp_read_barrier_depends() read_barrier_depends()
-#else
-# define smp_mb() barrier()
-# define smp_rmb() barrier()
-# define smp_wmb() barrier()
-# define smp_read_barrier_depends() do { } while(0)
-#endif
-
-/*
- * XXX check on these---I suspect what Linus really wants here is
- * acquire vs release semantics but we can't discuss this stuff with
- * Linus just yet. Grrr...
- */
-#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
-#define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
-
-#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
-
-/*
- * The group barrier in front of the rsm & ssm are necessary to ensure
- * that none of the previous instructions in the same group are
- * affected by the rsm/ssm.
- */
-/* For spinlocks etc */
-
-/*
- * - clearing psr.i is implicitly serialized (visible by next insn)
- * - setting psr.i requires data serialization
- * - we need a stop-bit before reading PSR because we sometimes
- * write a floating-point register right before reading the PSR
- * and that writes to PSR.mfl
- */
-#define __local_irq_save(x) \
-do { \
- ia64_stop(); \
- (x) = ia64_getreg(_IA64_REG_PSR); \
- ia64_stop(); \
- ia64_rsm(IA64_PSR_I); \
-} while (0)
-
-#define __local_irq_disable() \
-do { \
- ia64_stop(); \
- ia64_rsm(IA64_PSR_I); \
-} while (0)
-
-#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
-
-#ifdef CONFIG_IA64_DEBUG_IRQ
-
- extern unsigned long last_cli_ip;
-
-# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP)
-
-# define local_irq_save(x) \
-do { \
- unsigned long psr; \
- \
- __local_irq_save(psr); \
- if (psr & IA64_PSR_I) \
- __save_ip(); \
- (x) = psr; \
-} while (0)
-
-# define local_irq_disable() do { unsigned long x; local_irq_save(x); } while (0)
-
-# define local_irq_restore(x) \
-do { \
- unsigned long old_psr, psr = (x); \
- \
- local_save_flags(old_psr); \
- __local_irq_restore(psr); \
- if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) \
- __save_ip(); \
-} while (0)
-
-#else /* !CONFIG_IA64_DEBUG_IRQ */
-# define local_irq_save(x) __local_irq_save(x)
-# define local_irq_disable() __local_irq_disable()
-# define local_irq_restore(x) __local_irq_restore(x)
-#endif /* !CONFIG_IA64_DEBUG_IRQ */
-
-#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
-#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); })
-
-#define irqs_disabled() \
-({ \
- unsigned long __ia64_id_flags; \
- local_save_flags(__ia64_id_flags); \
- (__ia64_id_flags & IA64_PSR_I) == 0; \
-})
-
-#ifdef XEN
-#define local_irq_is_enabled() (!irqs_disabled())
-extern struct vcpu *ia64_switch_to(struct vcpu *next_task);
-#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
-#else
-#ifdef __KERNEL__
-
-#ifdef CONFIG_IA32_SUPPORT
-# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
-#else
-# define IS_IA32_PROCESS(regs) 0
-struct task_struct;
-static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){}
-static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){}
-#endif
-
-/*
- * Context switch from one thread to another. If the two threads have
- * different address spaces, schedule() has already taken care of
- * switching to the new address space by calling switch_mm().
- *
- * Disabling access to the fph partition and the debug-register
- * context switch MUST be done before calling ia64_switch_to() since a
- * newly created thread returns directly to
- * ia64_ret_from_syscall_clear_r8.
- */
-extern struct task_struct *ia64_switch_to (void *next_task);
-
-struct task_struct;
-
-extern void ia64_save_extra (struct task_struct *task);
-extern void ia64_load_extra (struct task_struct *task);
-
-#ifdef CONFIG_PERFMON
- DECLARE_PER_CPU(unsigned long, pfm_syst_info);
-# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
-#else
-# define PERFMON_IS_SYSWIDE() (0)
-#endif
-
-#define IA64_HAS_EXTRA_STATE(t) \
- ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
- || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
-
-#define __switch_to(prev,next,last) do { \
- if (IA64_HAS_EXTRA_STATE(prev)) \
- ia64_save_extra(prev); \
- if (IA64_HAS_EXTRA_STATE(next)) \
- ia64_load_extra(next); \
- ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
- (last) = ia64_switch_to((next)); \
-} while (0)
-
-#ifdef CONFIG_SMP
-/*
- * In the SMP case, we save the fph state when context-switching away from a thread that
- * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can
- * pick up the state from task->thread.fph, avoiding the complication of having to fetch
- * the latest fph state from another CPU. In other words: eager save, lazy restore.
- */
-# define switch_to(prev,next,last) do { \
- if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \
- ia64_psr(ia64_task_regs(prev))->mfh = 0; \
- (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
- __ia64_save_fpu((prev)->thread.fph); \
- } \
- __switch_to(prev, next, last); \
-} while (0)
-#else
-# define switch_to(prev,next,last) __switch_to(prev, next, last)
-#endif
-
-/*
- * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch,
- * because that could cause a deadlock. Here is an example by Erich Focht:
- *
- * Example:
- * CPU#0:
- * schedule()
- * -> spin_lock_irq(&rq->lock)
- * -> context_switch()
- * -> wrap_mmu_context()
- * -> read_lock(&tasklist_lock)
- *
- * CPU#1:
- * sys_wait4() or release_task() or forget_original_parent()
- * -> write_lock(&tasklist_lock)
- * -> do_notify_parent()
- * -> wake_up_parent()
- * -> try_to_wake_up()
- * -> spin_lock_irq(&parent_rq->lock)
- *
- * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock
- * of that CPU which will not be released, because there we wait for the
- * tasklist_lock to become available.
- */
-#define __ARCH_WANT_UNLOCKED_CTXSW
-
-#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
-
-void cpu_idle_wait(void);
-
-#define arch_align_stack(x) (x)
-
-#endif /* __KERNEL__ */
-#endif /* XEN */
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef XEN
-#include <asm/xensystem.h>
-#ifndef __ASSEMBLY__
-struct resource;
-#endif
-#endif
-
-#endif /* _ASM_IA64_SYSTEM_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/types.h b/xen/include/asm-ia64/linux-xen/asm/types.h
deleted file mode 100644
index 3dcfa34ae7..0000000000
--- a/xen/include/asm-ia64/linux-xen/asm/types.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#ifndef _ASM_IA64_TYPES_H
-#define _ASM_IA64_TYPES_H
-
-/*
- * This file is never included by application software unless explicitly requested (e.g.,
- * via linux/types.h) in which case the application is Linux specific so (user-) name
- * space pollution is not a major issue. However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- *
- * Based on <asm-alpha/types.h>.
- *
- * Modified 1998-2000, 2002
- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
- */
-
-#ifdef __ASSEMBLY__
-# define __IA64_UL(x) (x)
-# define __IA64_UL_CONST(x) x
-
-# ifdef __KERNEL__
-# define BITS_PER_LONG 64
-# endif
-
-#else
-# define __IA64_UL(x) ((unsigned long)(x))
-# define __IA64_UL_CONST(x) x##UL
-
-typedef unsigned int umode_t;
-
-/*
- * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
- * header files exported to user space
- */
-
-typedef __signed__ char __s8;
-typedef unsigned char __u8;
-
-typedef __signed__ short __s16;
-typedef unsigned short __u16;
-
-typedef __signed__ int __s32;
-typedef unsigned int __u32;
-
-typedef __signed__ long __s64;
-typedef unsigned long __u64;
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-# ifdef __KERNEL__
-
-typedef __s8 s8;
-typedef __u8 u8;
-
-typedef __s16 s16;
-typedef __u16 u16;
-
-typedef __s32 s32;
-typedef __u32 u32;
-
-typedef __s64 s64;
-typedef __u64 u64;
-
-#define BITS_PER_LONG 64
-
-/* DMA addresses are 64-bits wide, in general. */
-
-typedef u64 dma_addr_t;
-
-typedef unsigned short kmem_bufctl_t;
-
-# endif /* __KERNEL__ */
-#endif /* !__ASSEMBLY__ */
-
-#ifdef XEN
-#include <asm/xentypes.h>
-
-#ifndef __ASSEMBLY__
-typedef unsigned int gfp_t;
-typedef u64 resource_size_t;
-typedef u32 dev_t;
-typedef unsigned int mode_t;
-#define THIS_MODULE NULL
-#endif
-#endif
-
-#endif /* _ASM_IA64_TYPES_H */
diff --git a/xen/include/asm-ia64/linux-xen/linux/README.origin b/xen/include/asm-ia64/linux-xen/linux/README.origin
deleted file mode 100644
index fa59162b6e..0000000000
--- a/xen/include/asm-ia64/linux-xen/linux/README.origin
+++ /dev/null
@@ -1,23 +0,0 @@
-# Source files in this directory are near-identical copies of linux-2.6.13
-# files:
-
-# NOTE: ALL changes to these files should be clearly marked
-# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
-# easily updated to future versions of the corresponding Linux files.
-
-gfp.h -> linux/include/linux/gfp.h
-hardirq.h -> linux/include/linux/hardirq.h
-interrupt.h -> linux/include/linux/interrupt.h
-
-# The files below are from Linux-2.6.16.33
-oprofile.h -> linux/include/linux/oprofile.h
-
-# The files below are from Linux-2.6.19
-pci.h -> linux/include/linux/pci.h
-kobject.h -> linux/include/linux/kobject.h
-device.h -> linux/include/linux/device.h
-completion.h -> linux/include/linux/completion.h
-
-# The files below are from Linux-2.6.21
-cpu.h -> linux/include/linux/cpu.h
-efi.h -> linux/include/linux/efi.h
diff --git a/xen/include/asm-ia64/linux-xen/linux/completion.h b/xen/include/asm-ia64/linux-xen/linux/completion.h
deleted file mode 100644
index 2054930088..0000000000
--- a/xen/include/asm-ia64/linux-xen/linux/completion.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef __LINUX_COMPLETION_H
-#define __LINUX_COMPLETION_H
-
-/*
- * (C) Copyright 2001 Linus Torvalds
- *
- * Atomic wait-for-completion handler data structures.
- * See kernel/sched.c for details.
- */
-
-#include <linux/wait.h>
-
-struct completion {
- unsigned int done;
-#ifndef XEN
- wait_queue_head_t wait;
-#endif
-};
-
-#define COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
-
-#define COMPLETION_INITIALIZER_ONSTACK(work) \
- ({ init_completion(&work); work; })
-
-#define DECLARE_COMPLETION(work) \
- struct completion work = COMPLETION_INITIALIZER(work)
-
-/*
- * Lockdep needs to run a non-constant initializer for on-stack
- * completions - so we use the _ONSTACK() variant for those that
- * are on the kernel stack:
- */
-#ifdef CONFIG_LOCKDEP
-# define DECLARE_COMPLETION_ONSTACK(work) \
- struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
-#else
-# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
-#endif
-
-static inline void init_completion(struct completion *x)
-{
- x->done = 0;
-#ifndef XEN
- init_waitqueue_head(&x->wait);
-#endif
-}
-
-extern void FASTCALL(wait_for_completion(struct completion *));
-extern int FASTCALL(wait_for_completion_interruptible(struct completion *x));
-extern unsigned long FASTCALL(wait_for_completion_timeout(struct completion *x,
- unsigned long timeout));
-extern unsigned long FASTCALL(wait_for_completion_interruptible_timeout(
- struct completion *x, unsigned long timeout));
-
-extern void FASTCALL(complete(struct completion *));
-extern void FASTCALL(complete_all(struct completion *));
-
-#define INIT_COMPLETION(x) ((x).done = 0)
-
-#endif
diff --git a/xen/include/asm-ia64/linux-xen/linux/device.h b/xen/include/asm-ia64/linux-xen/linux/device.h
deleted file mode 100644
index 8ebe998659..0000000000
--- a/xen/include/asm-ia64/linux-xen/linux/device.h
+++ /dev/null
@@ -1,489 +0,0 @@
-/*
- * device.h - generic, centralized driver model
- *
- * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
- *
- * This file is released under the GPLv2
- *
- * See Documentation/driver-model/ for more information.
- */
-
-#ifndef _DEVICE_H_
-#define _DEVICE_H_
-
-#include <linux/ioport.h>
-#include <linux/kobject.h>
-#include <linux/klist.h>
-#include <linux/list.h>
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pm.h>
-#include <asm/semaphore.h>
-#include <asm/atomic.h>
-
-#define DEVICE_NAME_SIZE 50
-#define DEVICE_NAME_HALF __stringify(20) /* Less than half to accommodate slop */
-#define DEVICE_ID_SIZE 32
-#define BUS_ID_SIZE KOBJ_NAME_LEN
-
-
-struct device;
-struct device_driver;
-struct class;
-struct class_device;
-
-struct bus_type {
- const char * name;
-
- struct subsystem subsys;
- struct kset drivers;
- struct kset devices;
- struct klist klist_devices;
- struct klist klist_drivers;
-
- struct bus_attribute * bus_attrs;
- struct device_attribute * dev_attrs;
- struct driver_attribute * drv_attrs;
-
- int (*match)(struct device * dev, struct device_driver * drv);
- int (*uevent)(struct device *dev, char **envp,
- int num_envp, char *buffer, int buffer_size);
- int (*probe)(struct device * dev);
- int (*remove)(struct device * dev);
- void (*shutdown)(struct device * dev);
-
- int (*suspend)(struct device * dev, pm_message_t state);
- int (*suspend_late)(struct device * dev, pm_message_t state);
- int (*resume_early)(struct device * dev);
- int (*resume)(struct device * dev);
-};
-
-extern int __must_check bus_register(struct bus_type * bus);
-extern void bus_unregister(struct bus_type * bus);
-
-extern int __must_check bus_rescan_devices(struct bus_type * bus);
-
-/* iterator helpers for buses */
-
-int bus_for_each_dev(struct bus_type * bus, struct device * start, void * data,
- int (*fn)(struct device *, void *));
-struct device * bus_find_device(struct bus_type *bus, struct device *start,
- void *data, int (*match)(struct device *, void *));
-
-int __must_check bus_for_each_drv(struct bus_type *bus,
- struct device_driver *start, void *data,
- int (*fn)(struct device_driver *, void *));
-
-/* driverfs interface for exporting bus attributes */
-
-struct bus_attribute {
-#ifndef XEN
- struct attribute attr;
-#endif
- ssize_t (*show)(struct bus_type *, char * buf);
- ssize_t (*store)(struct bus_type *, const char * buf, size_t count);
-};
-
-#define BUS_ATTR(_name,_mode,_show,_store) \
-struct bus_attribute bus_attr_##_name = __ATTR(_name,_mode,_show,_store)
-
-extern int __must_check bus_create_file(struct bus_type *,
- struct bus_attribute *);
-extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
-
-struct device_driver {
- const char * name;
- struct bus_type * bus;
-
- struct completion unloaded;
- struct kobject kobj;
- struct klist klist_devices;
- struct klist_node knode_bus;
-
- struct module * owner;
-
- int (*probe) (struct device * dev);
- int (*remove) (struct device * dev);
- void (*shutdown) (struct device * dev);
- int (*suspend) (struct device * dev, pm_message_t state);
- int (*resume) (struct device * dev);
-
- unsigned int multithread_probe:1;
-};
-
-
-extern int __must_check driver_register(struct device_driver * drv);
-extern void driver_unregister(struct device_driver * drv);
-
-extern struct device_driver * get_driver(struct device_driver * drv);
-extern void put_driver(struct device_driver * drv);
-extern struct device_driver *driver_find(const char *name, struct bus_type *bus);
-extern int driver_probe_done(void);
-
-/* driverfs interface for exporting driver attributes */
-
-struct driver_attribute {
-#ifndef XEN
- struct attribute attr;
-#endif
- ssize_t (*show)(struct device_driver *, char * buf);
- ssize_t (*store)(struct device_driver *, const char * buf, size_t count);
-};
-
-#define DRIVER_ATTR(_name,_mode,_show,_store) \
-struct driver_attribute driver_attr_##_name = __ATTR(_name,_mode,_show,_store)
-
-extern int __must_check driver_create_file(struct device_driver *,
- struct driver_attribute *);
-extern void driver_remove_file(struct device_driver *, struct driver_attribute *);
-
-extern int __must_check driver_for_each_device(struct device_driver * drv,
- struct device *start, void *data,
- int (*fn)(struct device *, void *));
-struct device * driver_find_device(struct device_driver *drv,
- struct device *start, void *data,
- int (*match)(struct device *, void *));
-
-/*
- * device classes
- */
-struct class {
- const char * name;
- struct module * owner;
-
- struct subsystem subsys;
- struct list_head children;
- struct list_head devices;
- struct list_head interfaces;
-#ifdef XEN
- spinlock_t sem;
-#else
- struct semaphore sem; /* locks both the children and interfaces lists */
-#endif
-
- struct kobject *virtual_dir;
-
- struct class_attribute * class_attrs;
- struct class_device_attribute * class_dev_attrs;
- struct device_attribute * dev_attrs;
-
- int (*uevent)(struct class_device *dev, char **envp,
- int num_envp, char *buffer, int buffer_size);
- int (*dev_uevent)(struct device *dev, char **envp, int num_envp,
- char *buffer, int buffer_size);
-
- void (*release)(struct class_device *dev);
- void (*class_release)(struct class *class);
- void (*dev_release)(struct device *dev);
-
- int (*suspend)(struct device *, pm_message_t state);
- int (*resume)(struct device *);
-};
-
-extern int __must_check class_register(struct class *);
-extern void class_unregister(struct class *);
-
-
-struct class_attribute {
-#ifndef XEN
- struct attribute attr;
-#endif
- ssize_t (*show)(struct class *, char * buf);
- ssize_t (*store)(struct class *, const char * buf, size_t count);
-};
-
-#define CLASS_ATTR(_name,_mode,_show,_store) \
-struct class_attribute class_attr_##_name = __ATTR(_name,_mode,_show,_store)
-
-extern int __must_check class_create_file(struct class *,
- const struct class_attribute *);
-extern void class_remove_file(struct class *, const struct class_attribute *);
-
-struct class_device_attribute {
-#ifndef XEN
- struct attribute attr;
-#endif
- ssize_t (*show)(struct class_device *, char * buf);
- ssize_t (*store)(struct class_device *, const char * buf, size_t count);
-};
-
-#define CLASS_DEVICE_ATTR(_name,_mode,_show,_store) \
-struct class_device_attribute class_device_attr_##_name = \
- __ATTR(_name,_mode,_show,_store)
-
-extern int __must_check class_device_create_file(struct class_device *,
- const struct class_device_attribute *);
-
-/**
- * struct class_device - class devices
- * @class: pointer to the parent class for this class device. This is required.
- * @devt: for internal use by the driver core only.
- * @node: for internal use by the driver core only.
- * @kobj: for internal use by the driver core only.
- * @devt_attr: for internal use by the driver core only.
- * @groups: optional additional groups to be created
- * @dev: if set, a symlink to the struct device is created in the sysfs
- * directory for this struct class device.
- * @class_data: pointer to whatever you want to store here for this struct
- * class_device. Use class_get_devdata() and class_set_devdata() to get and
- * set this pointer.
- * @parent: pointer to a struct class_device that is the parent of this struct
- * class_device. If NULL, this class_device will show up at the root of the
- * struct class in sysfs (which is probably what you want to have happen.)
- * @release: pointer to a release function for this struct class_device. If
- * set, this will be called instead of the class specific release function.
- * Only use this if you want to override the default release function, like
- * when you are nesting class_device structures.
- * @uevent: pointer to a uevent function for this struct class_device. If
- * set, this will be called instead of the class specific uevent function.
- * Only use this if you want to override the default uevent function, like
- * when you are nesting class_device structures.
- */
-struct class_device {
- struct list_head node;
-
- struct kobject kobj;
- struct class * class; /* required */
- dev_t devt; /* dev_t, creates the sysfs "dev" */
- struct class_device_attribute *devt_attr;
- struct class_device_attribute uevent_attr;
- struct device * dev; /* not necessary, but nice to have */
- void * class_data; /* class-specific data */
- struct class_device *parent; /* parent of this child device, if there is one */
- struct attribute_group ** groups; /* optional groups */
-
- void (*release)(struct class_device *dev);
- int (*uevent)(struct class_device *dev, char **envp,
- int num_envp, char *buffer, int buffer_size);
- char class_id[BUS_ID_SIZE]; /* unique to this class */
-};
-
-static inline void *
-class_get_devdata (struct class_device *dev)
-{
- return dev->class_data;
-}
-
-static inline void
-class_set_devdata (struct class_device *dev, void *data)
-{
- dev->class_data = data;
-}
-
-
-extern int __must_check class_device_register(struct class_device *);
-extern void class_device_unregister(struct class_device *);
-extern void class_device_initialize(struct class_device *);
-extern int __must_check class_device_add(struct class_device *);
-extern void class_device_del(struct class_device *);
-
-extern int class_device_rename(struct class_device *, char *);
-
-extern struct class_device * class_device_get(struct class_device *);
-extern void class_device_put(struct class_device *);
-
-extern void class_device_remove_file(struct class_device *,
- const struct class_device_attribute *);
-extern int __must_check class_device_create_bin_file(struct class_device *,
- struct bin_attribute *);
-extern void class_device_remove_bin_file(struct class_device *,
- struct bin_attribute *);
-
-struct class_interface {
- struct list_head node;
- struct class *class;
-
- int (*add) (struct class_device *, struct class_interface *);
- void (*remove) (struct class_device *, struct class_interface *);
- int (*add_dev) (struct device *, struct class_interface *);
- void (*remove_dev) (struct device *, struct class_interface *);
-};
-
-extern int __must_check class_interface_register(struct class_interface *);
-extern void class_interface_unregister(struct class_interface *);
-
-extern struct class *class_create(struct module *owner, const char *name);
-extern void class_destroy(struct class *cls);
-extern struct class_device *class_device_create(struct class *cls,
- struct class_device *parent,
- dev_t devt,
- struct device *device,
- const char *fmt, ...)
- __attribute__((format(printf,5,6)));
-extern void class_device_destroy(struct class *cls, dev_t devt);
-
-/* interface for exporting device attributes */
-struct device_attribute {
- struct attribute attr;
- ssize_t (*show)(struct device *dev, struct device_attribute *attr,
- char *buf);
- ssize_t (*store)(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count);
-};
-
-#define DEVICE_ATTR(_name,_mode,_show,_store) \
-struct device_attribute dev_attr_##_name = __ATTR(_name,_mode,_show,_store)
-
-extern int __must_check device_create_file(struct device *device,
- struct device_attribute * entry);
-extern void device_remove_file(struct device * dev, struct device_attribute * attr);
-extern int __must_check device_create_bin_file(struct device *dev,
- struct bin_attribute *attr);
-extern void device_remove_bin_file(struct device *dev,
- struct bin_attribute *attr);
-struct device {
- struct klist klist_children;
- struct klist_node knode_parent; /* node in sibling list */
- struct klist_node knode_driver;
- struct klist_node knode_bus;
- struct device * parent;
-
- struct kobject kobj;
- char bus_id[BUS_ID_SIZE]; /* position on parent bus */
- unsigned is_registered:1;
- struct device_attribute uevent_attr;
- struct device_attribute *devt_attr;
-
-#ifdef XEN
- spinlock_t sem;
-#else
- struct semaphore sem; /* semaphore to synchronize calls to
- * its driver.
- */
-#endif
-
- struct bus_type * bus; /* type of bus device is on */
- struct device_driver *driver; /* which driver has allocated this
- device */
- void *driver_data; /* data private to the driver */
- void *platform_data; /* Platform specific data, device
- core doesn't touch it */
- void *firmware_data; /* Firmware specific data (e.g. ACPI,
- BIOS data),reserved for device core*/
- struct dev_pm_info power;
-
- u64 *dma_mask; /* dma mask (if dma'able device) */
- u64 coherent_dma_mask;/* Like dma_mask, but for
- alloc_coherent mappings as
- not all hardware supports
- 64 bit addresses for consistent
- allocations such descriptors. */
-
- struct list_head dma_pools; /* dma pools (if dma'ble) */
-
- struct dma_coherent_mem *dma_mem; /* internal for coherent mem
- override */
-
- /* class_device migration path */
- struct list_head node;
- struct class *class; /* optional*/
- dev_t devt; /* dev_t, creates the sysfs "dev" */
- struct attribute_group **groups; /* optional groups */
-
- void (*release)(struct device * dev);
-};
-
-static inline void *
-dev_get_drvdata (struct device *dev)
-{
- return dev->driver_data;
-}
-
-static inline void
-dev_set_drvdata (struct device *dev, void *data)
-{
- dev->driver_data = data;
-}
-
-static inline int device_is_registered(struct device *dev)
-{
- return dev->is_registered;
-}
-
-/*
- * High level routines for use by the bus drivers
- */
-extern int __must_check device_register(struct device * dev);
-extern void device_unregister(struct device * dev);
-extern void device_initialize(struct device * dev);
-extern int __must_check device_add(struct device * dev);
-extern void device_del(struct device * dev);
-extern int device_for_each_child(struct device *, void *,
- int (*fn)(struct device *, void *));
-extern int device_rename(struct device *dev, char *new_name);
-
-/*
- * Manual binding of a device to driver. See drivers/base/bus.c
- * for information on use.
- */
-extern int __must_check device_bind_driver(struct device *dev);
-extern void device_release_driver(struct device * dev);
-extern int __must_check device_attach(struct device * dev);
-extern int __must_check driver_attach(struct device_driver *drv);
-extern int __must_check device_reprobe(struct device *dev);
-
-/*
- * Easy functions for dynamically creating devices on the fly
- */
-extern struct device *device_create(struct class *cls, struct device *parent,
- dev_t devt, const char *fmt, ...)
- __attribute__((format(printf,4,5)));
-extern void device_destroy(struct class *cls, dev_t devt);
-
-extern int virtual_device_parent(struct device *dev);
-
-/*
- * Platform "fixup" functions - allow the platform to have their say
- * about devices and actions that the general device layer doesn't
- * know about.
- */
-/* Notify platform of device discovery */
-extern int (*platform_notify)(struct device * dev);
-
-extern int (*platform_notify_remove)(struct device * dev);
-
-
-/**
- * get_device - atomically increment the reference count for the device.
- *
- */
-extern struct device * get_device(struct device * dev);
-extern void put_device(struct device * dev);
-
-
-/* drivers/base/power/shutdown.c */
-extern void device_shutdown(void);
-
-
-/* drivers/base/firmware.c */
-extern int __must_check firmware_register(struct subsystem *);
-extern void firmware_unregister(struct subsystem *);
-
-/* debugging and troubleshooting/diagnostic helpers. */
-extern const char *dev_driver_string(struct device *dev);
-#define dev_printk(level, dev, format, arg...) \
- printk(level "%s %s: " format , dev_driver_string(dev) , (dev)->bus_id , ## arg)
-
-#ifdef DEBUG
-#define dev_dbg(dev, format, arg...) \
- dev_printk(KERN_DEBUG , dev , format , ## arg)
-#else
-#define dev_dbg(dev, format, arg...) do { (void)(dev); } while (0)
-#endif
-
-#define dev_err(dev, format, arg...) \
- dev_printk(KERN_ERR , dev , format , ## arg)
-#define dev_info(dev, format, arg...) \
- dev_printk(KERN_INFO , dev , format , ## arg)
-#define dev_warn(dev, format, arg...) \
- dev_printk(KERN_WARNING , dev , format , ## arg)
-#define dev_notice(dev, format, arg...) \
- dev_printk(KERN_NOTICE , dev , format , ## arg)
-
-/* Create alias, so I can be autoloaded. */
-#define MODULE_ALIAS_CHARDEV(major,minor) \
- MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
-#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
- MODULE_ALIAS("char-major-" __stringify(major) "-*")
-#endif /* _DEVICE_H_ */
diff --git a/xen/include/asm-ia64/linux-xen/linux/efi.h b/xen/include/asm-ia64/linux-xen/linux/efi.h
deleted file mode 100644
index 2229101a26..0000000000
--- a/xen/include/asm-ia64/linux-xen/linux/efi.h
+++ /dev/null
@@ -1,534 +0,0 @@
-#ifndef _LINUX_EFI_H
-#define _LINUX_EFI_H
-
-#ifndef __ASSEMBLY__
-
-/*
- * Extensible Firmware Interface
- * Based on 'Extensible Firmware Interface Specification' version 0.9, April 30, 1999
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co.
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/rtc.h>
-#include <linux/ioport.h>
-
-#include <asm/page.h>
-#include <asm/system.h>
-#ifdef XEN
-#include <asm/pgtable.h>
-#endif
-
-#define EFI_SUCCESS 0
-#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
-#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
-#define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1)))
-#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
-#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1)))
-#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
-
-typedef unsigned long efi_status_t;
-typedef u8 efi_bool_t;
-typedef u16 efi_char16_t; /* UNICODE character */
-
-
-typedef struct {
- u8 b[16];
-} efi_guid_t;
-
-#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
-((efi_guid_t) \
-{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
- (b) & 0xff, ((b) >> 8) & 0xff, \
- (c) & 0xff, ((c) >> 8) & 0xff, \
- (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
-
-/*
- * Generic EFI table header
- */
-typedef struct {
- u64 signature;
- u32 revision;
- u32 headersize;
- u32 crc32;
- u32 reserved;
-} efi_table_hdr_t;
-
-/*
- * Memory map descriptor:
- */
-
-/* Memory types: */
-#define EFI_RESERVED_TYPE 0
-#define EFI_LOADER_CODE 1
-#define EFI_LOADER_DATA 2
-#define EFI_BOOT_SERVICES_CODE 3
-#define EFI_BOOT_SERVICES_DATA 4
-#define EFI_RUNTIME_SERVICES_CODE 5
-#define EFI_RUNTIME_SERVICES_DATA 6
-#define EFI_CONVENTIONAL_MEMORY 7
-#define EFI_UNUSABLE_MEMORY 8
-#define EFI_ACPI_RECLAIM_MEMORY 9
-#define EFI_ACPI_MEMORY_NVS 10
-#define EFI_MEMORY_MAPPED_IO 11
-#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
-#define EFI_PAL_CODE 13
-#define EFI_MAX_MEMORY_TYPE 14
-
-/* Attribute values: */
-#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */
-#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */
-#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */
-#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */
-#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
-#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
-#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
-#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
-#define EFI_MEMORY_DESCRIPTOR_VERSION 1
-
-#define EFI_PAGE_SHIFT 12
-
-typedef struct {
- u32 type;
- u32 pad;
- u64 phys_addr;
- u64 virt_addr;
- u64 num_pages;
- u64 attribute;
-} efi_memory_desc_t;
-
-typedef int (*efi_freemem_callback_t) (unsigned long start, unsigned long end, void *arg);
-
-/*
- * Types and defines for Time Services
- */
-#define EFI_TIME_ADJUST_DAYLIGHT 0x1
-#define EFI_TIME_IN_DAYLIGHT 0x2
-#define EFI_UNSPECIFIED_TIMEZONE 0x07ff
-
-typedef struct {
- u16 year;
- u8 month;
- u8 day;
- u8 hour;
- u8 minute;
- u8 second;
- u8 pad1;
- u32 nanosecond;
- s16 timezone;
- u8 daylight;
- u8 pad2;
-} efi_time_t;
-
-typedef struct {
- u32 resolution;
- u32 accuracy;
- u8 sets_to_zero;
-} efi_time_cap_t;
-
-/*
- * Types and defines for EFI ResetSystem
- */
-#define EFI_RESET_COLD 0
-#define EFI_RESET_WARM 1
-#define EFI_RESET_SHUTDOWN 2
-
-/*
- * EFI Runtime Services table
- */
-#define EFI_RUNTIME_SERVICES_SIGNATURE ((u64)0x5652453544e5552ULL)
-#define EFI_RUNTIME_SERVICES_REVISION 0x00010000
-
-typedef struct {
- efi_table_hdr_t hdr;
- unsigned long get_time;
- unsigned long set_time;
- unsigned long get_wakeup_time;
- unsigned long set_wakeup_time;
- unsigned long set_virtual_address_map;
- unsigned long convert_pointer;
- unsigned long get_variable;
- unsigned long get_next_variable;
- unsigned long set_variable;
- unsigned long get_next_high_mono_count;
- unsigned long reset_system;
-} efi_runtime_services_t;
-
-typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
-typedef efi_status_t efi_set_time_t (efi_time_t *tm);
-typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
- efi_time_t *tm);
-typedef efi_status_t efi_set_wakeup_time_t (efi_bool_t enabled, efi_time_t *tm);
-typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
- unsigned long *data_size, void *data);
-typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name,
- efi_guid_t *vendor);
-typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
- unsigned long attr, unsigned long data_size,
- void *data);
-typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
-typedef void efi_reset_system_t (int reset_type, efi_status_t status,
- unsigned long data_size, efi_char16_t *data);
-typedef efi_status_t efi_set_virtual_address_map_t (unsigned long memory_map_size,
- unsigned long descriptor_size,
- u32 descriptor_version,
- efi_memory_desc_t *virtual_map);
-
-/*
- * EFI Configuration Table and GUID definitions
- */
-#define NULL_GUID \
- EFI_GUID( 0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 )
-
-#define MPS_TABLE_GUID \
- EFI_GUID( 0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
-
-#define ACPI_TABLE_GUID \
- EFI_GUID( 0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
-
-#define ACPI_20_TABLE_GUID \
- EFI_GUID( 0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81 )
-
-#define SMBIOS_TABLE_GUID \
- EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
-
-#define SAL_SYSTEM_TABLE_GUID \
- EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
-
-#define HCDP_TABLE_GUID \
- EFI_GUID( 0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98 )
-
-#define UGA_IO_PROTOCOL_GUID \
- EFI_GUID( 0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0xb, 0x7, 0xa2 )
-
-#define EFI_GLOBAL_VARIABLE_GUID \
- EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c )
-
-typedef struct {
- efi_guid_t guid;
- unsigned long table;
-} efi_config_table_t;
-
-#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
-
-typedef struct {
- efi_table_hdr_t hdr;
- unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
- u32 fw_revision;
- unsigned long con_in_handle;
- unsigned long con_in;
- unsigned long con_out_handle;
- unsigned long con_out;
- unsigned long stderr_handle;
- unsigned long stderr;
- efi_runtime_services_t *runtime;
- unsigned long boottime;
- unsigned long nr_tables;
- unsigned long tables;
-} efi_system_table_t;
-
-struct efi_memory_map {
-#ifndef XEN
- void *phys_map;
- void *map;
-#else
- efi_memory_desc_t *phys_map;
- efi_memory_desc_t *map;
-#endif
- void *map_end;
- int nr_map;
- unsigned long desc_version;
- unsigned long desc_size;
-};
-
-#define EFI_INVALID_TABLE_ADDR (~0UL)
-
-/*
- * All runtime access to EFI goes through this structure:
- */
-extern struct efi {
- efi_system_table_t *systab; /* EFI system table */
- unsigned long mps; /* MPS table */
- unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
- unsigned long acpi20; /* ACPI table (ACPI 2.0) */
- unsigned long smbios; /* SM BIOS table */
- unsigned long sal_systab; /* SAL system table */
- unsigned long boot_info; /* boot info table */
- unsigned long hcdp; /* HCDP table */
- unsigned long uga; /* UGA table */
- efi_get_time_t *get_time;
- efi_set_time_t *set_time;
- efi_get_wakeup_time_t *get_wakeup_time;
- efi_set_wakeup_time_t *set_wakeup_time;
- efi_get_variable_t *get_variable;
- efi_get_next_variable_t *get_next_variable;
- efi_set_variable_t *set_variable;
- efi_get_next_high_mono_count_t *get_next_high_mono_count;
- efi_reset_system_t *reset_system;
- efi_set_virtual_address_map_t *set_virtual_address_map;
-} efi;
-
-static inline int
-efi_guidcmp (efi_guid_t left, efi_guid_t right)
-{
- return memcmp(&left, &right, sizeof (efi_guid_t));
-}
-
-static inline char *
-efi_guid_unparse(efi_guid_t *guid, char *out)
-{
-#ifndef XEN
- sprintf(out, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
-#else
- snprintf(out, 37, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
-#endif
- guid->b[3], guid->b[2], guid->b[1], guid->b[0],
- guid->b[5], guid->b[4], guid->b[7], guid->b[6],
- guid->b[8], guid->b[9], guid->b[10], guid->b[11],
- guid->b[12], guid->b[13], guid->b[14], guid->b[15]);
- return out;
-}
-
-extern void efi_init (void);
-extern void *efi_get_pal_addr (void);
-extern void efi_map_pal_code (void);
-#ifdef XEN
-extern void efi_unmap_pal_code (void);
-#endif
-extern void efi_map_memmap(void);
-extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
-extern void efi_gettimeofday (struct timespec *ts);
-extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
-extern u64 efi_get_iobase (void);
-extern u32 efi_mem_type (unsigned long phys_addr);
-extern u64 efi_mem_attributes (unsigned long phys_addr);
-extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
-extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size,
- u64 attr);
-extern int __init efi_uart_console_only (void);
-extern void efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource);
-extern unsigned long efi_get_time(void);
-extern int efi_set_rtc_mmss(unsigned long nowtime);
-extern int is_available_memory(efi_memory_desc_t * md);
-extern struct efi_memory_map memmap;
-
-/**
- * efi_range_is_wc - check the WC bit on an address range
- * @start: starting kvirt address
- * @len: length of range
- *
- * Consult the EFI memory map and make sure it's ok to set this range WC.
- * Returns true or false.
- */
-static inline int efi_range_is_wc(unsigned long start, unsigned long len)
-{
- unsigned long i;
-
- for (i = 0; i < len; i += (1UL << EFI_PAGE_SHIFT)) {
- unsigned long paddr = __pa(start + i);
- if (!(efi_mem_attributes(paddr) & EFI_MEMORY_WC))
- return 0;
- }
- /* The range checked out */
- return 1;
-}
-
-#ifdef CONFIG_EFI_PCDP
-extern int __init efi_setup_pcdp_console(char *);
-#endif
-
-/*
- * We play games with efi_enabled so that the compiler will, if possible, remove
- * EFI-related code altogether.
- */
-#ifdef CONFIG_EFI
-# ifdef CONFIG_X86
- extern int efi_enabled;
-# else
-# define efi_enabled 1
-# endif
-#else
-# define efi_enabled 0
-#endif
-
-/*
- * Variable Attributes
- */
-#define EFI_VARIABLE_NON_VOLATILE 0x0000000000000001
-#define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002
-#define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004
-
-/*
- * EFI Device Path information
- */
-#define EFI_DEV_HW 0x01
-#define EFI_DEV_PCI 1
-#define EFI_DEV_PCCARD 2
-#define EFI_DEV_MEM_MAPPED 3
-#define EFI_DEV_VENDOR 4
-#define EFI_DEV_CONTROLLER 5
-#define EFI_DEV_ACPI 0x02
-#define EFI_DEV_BASIC_ACPI 1
-#define EFI_DEV_EXPANDED_ACPI 2
-#define EFI_DEV_MSG 0x03
-#define EFI_DEV_MSG_ATAPI 1
-#define EFI_DEV_MSG_SCSI 2
-#define EFI_DEV_MSG_FC 3
-#define EFI_DEV_MSG_1394 4
-#define EFI_DEV_MSG_USB 5
-#define EFI_DEV_MSG_USB_CLASS 15
-#define EFI_DEV_MSG_I20 6
-#define EFI_DEV_MSG_MAC 11
-#define EFI_DEV_MSG_IPV4 12
-#define EFI_DEV_MSG_IPV6 13
-#define EFI_DEV_MSG_INFINIBAND 9
-#define EFI_DEV_MSG_UART 14
-#define EFI_DEV_MSG_VENDOR 10
-#define EFI_DEV_MEDIA 0x04
-#define EFI_DEV_MEDIA_HARD_DRIVE 1
-#define EFI_DEV_MEDIA_CDROM 2
-#define EFI_DEV_MEDIA_VENDOR 3
-#define EFI_DEV_MEDIA_FILE 4
-#define EFI_DEV_MEDIA_PROTOCOL 5
-#define EFI_DEV_BIOS_BOOT 0x05
-#define EFI_DEV_END_PATH 0x7F
-#define EFI_DEV_END_PATH2 0xFF
-#define EFI_DEV_END_INSTANCE 0x01
-#define EFI_DEV_END_ENTIRE 0xFF
-
-struct efi_generic_dev_path {
- u8 type;
- u8 sub_type;
- u16 length;
-} __attribute ((packed));
-
-#ifdef XEN
-/*
- * According to xen/arch/ia64/xen/regionreg.c the RID space is broken up
- * into large-blocks. Each block belongs to a domain, except 0th block,
- * which is broken up into small-blocks. The small-blocks are used for
- * metaphysical mappings, again one per domain, except for the 0th
- * small-block which is unused other than very early on in the
- * hypervisor boot.
- *
- * By default each large-block is 18 bits wide, which is also the minimum
- * allowed width for a block. Each small-block is by default 1/64 the width
- * of a large-block, which is the maximum division allowed. In other words
- * each small-block is at least 12 bits wide.
- *
- * The portion of the 0th small-block that is used early on during
- * the hypervisor boot relates to IA64_REGION_ID_KERNEL, which is
- * used to form an RID using the following scheme which seems to be
- * have been inherited from Linux:
- *
- * a: bits 0-2: Region Number (0-7)
- * b: 3-N: IA64_REGION_ID_KERNEL (0)
- * c: N-23: reserved (0)
- *
- * N is defined by the platform.
- *
- * For EFI we use the following RID:
- *
- * a: bits 0-2: Region Number (0-7)
- * e: bits 3-N: IA64_REGION_ID_KERNEL (1)
- * f: bits N-53: reserved (0)
- *
- * + Only 6 and 7 are used as we only need two RIDs. Its not really important
- * what this number is, so long as its between 0 and 7.
- *
- * The nice thing about this is that we are only using 4 bits of RID
- * space, so it shouldn't have any chance of running into an adjacent
- * small-block since small-blocks are at least 12 bits wide.
- *
- * It would actually be possible to just use a IA64_REGION_ID_KERNEL
- * based RID for EFI use. The important thing is that it is in the 0th
- * small block, and thus not available to domains. But as we have
- * lots of space, its seems to be nice and clean to just use a separate
- * RID for EFI.
- *
- * This can be trivially changed by updating the definition of XEN_EFI_RR.
- *
- * For reference, the RID is used to produce the value inserted
- * in to a region register in the following way:
- *
- * A: bit 0: VHPT (0 = off, 1 = on)
- * B: bit 1: reserved (0)
- * C: bits 2-7: log 2 page_size
- * D: bits 8-N: RID
- * E: bits N-53: reserved (0)
- */
-
-/* rr7 (and rr6) may already be set to XEN_EFI_RR7 (and XEN_EFI_RR6), which
- * would indicate a nested EFI, SAL or PAL call, such
- * as from an MCA. This may have occurred during a call
- * to set_one_rr_efi(). To be safe, repin everything anyway.
- */
-
-#define XEN_EFI_RR_ENTER(rr6, rr7) do { \
- rr6 = ia64_get_rr(6UL << 61); \
- rr7 = ia64_get_rr(7UL << 61); \
- set_one_rr_efi(6UL << 61, XEN_EFI_RR6); \
- set_one_rr_efi(7UL << 61, XEN_EFI_RR7); \
- efi_map_pal_code(); \
-} while (0)
-
-/* There is no need to do anything if the saved rr7 (and rr6)
- * is XEN_EFI_RR, as it would just switch them from XEN_EFI_RR to XEN_EFI_RR
- * Furthermore, if this is a nested call it is important not
- * to unpin efi_unmap_pal_code() until the outermost call is finished
- */
-
-#define XEN_EFI_RR_LEAVE(rr6, rr7) do { \
- if (rr7 != XEN_EFI_RR7) { \
- efi_unmap_pal_code(); \
- set_one_rr_efi_restore(6UL << 61, rr6); \
- set_one_rr_efi_restore(7UL << 61, rr7); \
- } \
-} while (0)
-
-#else
-/* Just use rr6 and rr7 in a dummy fashion here to get
- * rid of compiler warnings - a better solution should
- * be found if this code is ever actually used */
-#define XEN_EFI_RR_ENTER(rr6, rr7) do { rr6 = 0; rr7 = 0; } while (0)
-#define XEN_EFI_RR_LEAVE(rr6, rr7) do {} while (0)
-#endif /* XEN */
-
-#define XEN_EFI_RR_DECLARE(rr6, rr7) unsigned long rr6, rr7;
-
-#endif /* !__ASSEMBLY__ */
-
-#ifdef XEN
-#include <asm/mmu_context.h> /* For IA64_REGION_ID_EFI and ia64_rid() */
-#include <asm/pgtable.h> /* IA64_GRANULE_SHIFT */
-
-/* macro version of vmMangleRID() */
-#define XEN_EFI_VM_MANGLE_RRVAL(rrval) \
- ((((rrval) & 0xff000000) >> 16) | \
- ((rrval) & 0x00ff0000) | \
- (((rrval) & 0x0000ff00) << 16 ) | \
- ((rrval) & 0x000000ff))
-
-#define XEN_EFI_REGION6 __IA64_UL_CONST(6)
-#define XEN_EFI_REGION7 __IA64_UL_CONST(7)
-#define _XEN_EFI_RR6 ((ia64_rid(XEN_IA64_REGION_ID_EFI, \
- XEN_EFI_REGION6) << 8) | \
- (IA64_GRANULE_SHIFT << 2))
-#define _XEN_EFI_RR7 ((ia64_rid(XEN_IA64_REGION_ID_EFI, \
- XEN_EFI_REGION7) << 8) | \
- (IA64_GRANULE_SHIFT << 2))
-#define XEN_EFI_RR6 XEN_EFI_VM_MANGLE_RRVAL(_XEN_EFI_RR6)
-#define XEN_EFI_RR7 XEN_EFI_VM_MANGLE_RRVAL(_XEN_EFI_RR7)
-
-#endif /* XEN */
-
-#endif /* _LINUX_EFI_H */
diff --git a/xen/include/asm-ia64/linux-xen/linux/gfp.h b/xen/include/asm-ia64/linux-xen/linux/gfp.h
deleted file mode 100644
index 1733180046..0000000000
--- a/xen/include/asm-ia64/linux-xen/linux/gfp.h
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef __LINUX_GFP_H
-#define __LINUX_GFP_H
-
-#ifdef XEN
-#include <asm/bitops.h>
-#include <linux/topology.h>
-#endif
-#include <linux/mmzone.h>
-#include <linux/stddef.h>
-#include <linux/linkage.h>
-#include <linux/config.h>
-
-struct vm_area_struct;
-
-/*
- * GFP bitmasks..
- */
-/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
-#define __GFP_DMA 0x01u
-#define __GFP_HIGHMEM 0x02u
-
-/*
- * Action modifiers - doesn't change the zoning
- *
- * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
- * _might_ fail. This depends upon the particular VM implementation.
- *
- * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures.
- *
- * __GFP_NORETRY: The VM implementation must not retry indefinitely.
- */
-#define __GFP_WAIT 0x10u /* Can wait and reschedule? */
-#define __GFP_HIGH 0x20u /* Should access emergency pools? */
-#define __GFP_IO 0x40u /* Can start physical IO? */
-#define __GFP_FS 0x80u /* Can call down to low-level FS? */
-#define __GFP_COLD 0x100u /* Cache-cold page required */
-#define __GFP_NOWARN 0x200u /* Suppress page allocation failure warning */
-#define __GFP_REPEAT 0x400u /* Retry the allocation. Might fail */
-#define __GFP_NOFAIL 0x800u /* Retry for ever. Cannot fail */
-#define __GFP_NORETRY 0x1000u /* Do not retry. Might fail */
-#define __GFP_NO_GROW 0x2000u /* Slab internal usage */
-#define __GFP_COMP 0x4000u /* Add compound page metadata */
-#define __GFP_ZERO 0x8000u /* Return zeroed page on success */
-#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
-#define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */
-
-#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
-#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
-
-/* if you forget to add the bitmask here kernel will crash, period */
-#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
- __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
- __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
- __GFP_NOMEMALLOC|__GFP_NORECLAIM)
-
-#define GFP_ATOMIC (__GFP_HIGH)
-#define GFP_NOIO (__GFP_WAIT)
-#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
-#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
-#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS)
-#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
-
-/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
- platforms, used as appropriate on others */
-
-#define GFP_DMA __GFP_DMA
-
-
-/*
- * There is only one page-allocator function, and two main namespaces to
- * it. The alloc_page*() variants return 'struct page *' and as such
- * can allocate highmem pages, the *get*page*() variants return
- * virtual kernel addresses to the allocated page(s).
- */
-
-/*
- * We get the zone list from the current node and the gfp_mask.
- * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
- *
- * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
- * optimized to &contig_page_data at compile-time.
- */
-
-#ifndef XEN
-#ifndef HAVE_ARCH_FREE_PAGE
-static inline void arch_free_page(struct page *page, int order) { }
-#endif
-
-extern struct page *
-FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
-
-static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_mask,
- unsigned int order)
-{
- if (unlikely(order >= MAX_ORDER))
- return NULL;
-
- return __alloc_pages(gfp_mask, order,
- NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
-}
-
-#ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, unsigned order);
-
-static inline struct page *
-alloc_pages(unsigned int __nocast gfp_mask, unsigned int order)
-{
- if (unlikely(order >= MAX_ORDER))
- return NULL;
-
- return alloc_pages_current(gfp_mask, order);
-}
-extern struct page *alloc_page_vma(unsigned __nocast gfp_mask,
- struct vm_area_struct *vma, unsigned long addr);
-#else
-#define alloc_pages(gfp_mask, order) \
- alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
-#endif
-#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-
-extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, unsigned int order));
-extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
-
-#define __get_free_page(gfp_mask) \
- __get_free_pages((gfp_mask),0)
-
-#define __get_dma_pages(gfp_mask, order) \
- __get_free_pages((gfp_mask) | GFP_DMA,(order))
-
-extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
-extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
-extern void FASTCALL(free_hot_page(struct page *page));
-extern void FASTCALL(free_cold_page(struct page *page));
-
-#define __free_page(page) __free_pages((page), 0)
-#define free_page(addr) free_pages((addr),0)
-#endif /* XEN */
-
-void page_alloc_init(void);
-#ifdef CONFIG_NUMA
-void drain_remote_pages(void);
-#else
-static inline void drain_remote_pages(void) { };
-#endif
-
-#endif /* __LINUX_GFP_H */
diff --git a/xen/include/asm-ia64/linux-xen/linux/hardirq.h b/xen/include/asm-ia64/linux-xen/linux/hardirq.h
deleted file mode 100644
index 2c691dd391..0000000000
--- a/xen/include/asm-ia64/linux-xen/linux/hardirq.h
+++ /dev/null
@@ -1,116 +0,0 @@
-#ifndef LINUX_HARDIRQ_H
-#define LINUX_HARDIRQ_H
-
-#include <linux/config.h>
-#include <linux/preempt.h>
-#include <linux/smp_lock.h>
-#include <asm/hardirq.h>
-#include <asm/system.h>
-
-/*
- * We put the hardirq and softirq counter into the preemption
- * counter. The bitmask has the following meaning:
- *
- * - bits 0-7 are the preemption count (max preemption depth: 256)
- * - bits 8-15 are the softirq count (max # of softirqs: 256)
- *
- * The hardirq count can be overridden per architecture, the default is:
- *
- * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
- * - ( bit 28 is the PREEMPT_ACTIVE flag. )
- *
- * PREEMPT_MASK: 0x000000ff
- * SOFTIRQ_MASK: 0x0000ff00
- * HARDIRQ_MASK: 0x0fff0000
- */
-#define PREEMPT_BITS 8
-#define SOFTIRQ_BITS 8
-
-#ifndef HARDIRQ_BITS
-#define HARDIRQ_BITS 12
-/*
- * The hardirq mask has to be large enough to have space for potentially
- * all IRQ sources in the system nesting on a single CPU.
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-#endif
-
-#define PREEMPT_SHIFT 0
-#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
-#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-
-#define __IRQ_MASK(x) ((1UL << (x))-1)
-
-#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
-#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
-#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
-
-#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
-#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
-#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
-
-#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
-#ifndef XEN
-#error PREEMPT_ACTIVE is too low!
-#endif
-#endif
-
-#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
-
-/*
- * Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- */
-#define in_irq() (hardirq_count())
-#define in_softirq() (softirq_count())
-#define in_interrupt() (irq_count())
-
-#ifndef XEN
-#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
-# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
-#else
-# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
-#endif
-#endif
-
-#ifdef CONFIG_PREEMPT
-# define preemptible() (preempt_count() == 0 && !irqs_disabled())
-# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
-#else
-# define preemptible() 0
-# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
-#endif
-
-#ifdef CONFIG_SMP
-extern void synchronize_irq(unsigned int irq);
-#else
-# define synchronize_irq(irq) barrier()
-#endif
-
-#define nmi_enter() irq_enter()
-#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
-
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
-static inline void account_user_vtime(struct task_struct *tsk)
-{
-}
-
-static inline void account_system_vtime(struct task_struct *tsk)
-{
-}
-#endif
-
-#define irq_enter() \
- do { \
- account_system_vtime(current); \
- /*add_preempt_count(HARDIRQ_OFFSET);*/ \
- preempt_count() += HARDIRQ_OFFSET; \
- } while (0)
-
-extern void irq_exit(void);
-
-#endif /* LINUX_HARDIRQ_H */
diff --git a/xen/include/asm-ia64/linux-xen/linux/interrupt.h b/xen/include/asm-ia64/linux-xen/linux/interrupt.h
deleted file mode 100644
index 2990ace0a5..0000000000
--- a/xen/include/asm-ia64/linux-xen/linux/interrupt.h
+++ /dev/null
@@ -1,307 +0,0 @@
-/* interrupt.h */
-#ifndef _LINUX_INTERRUPT_H
-#define _LINUX_INTERRUPT_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/linkage.h>
-#include <linux/bitops.h>
-#include <linux/preempt.h>
-#include <linux/cpumask.h>
-#include <linux/hardirq.h>
-#include <asm/atomic.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-
-/*
- * For 2.4.x compatibility, 2.4.x can use
- *
- * typedef void irqreturn_t;
- * #define IRQ_NONE
- * #define IRQ_HANDLED
- * #define IRQ_RETVAL(x)
- *
- * To mix old-style and new-style irq handler returns.
- *
- * IRQ_NONE means we didn't handle it.
- * IRQ_HANDLED means that we did have a valid interrupt and handled it.
- * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
- */
-#ifdef XEN
-typedef void irqreturn_t;
-#define IRQ_NONE
-#define IRQ_HANDLED
-#define IRQ_RETVAL(x)
-#else
-typedef int irqreturn_t;
-#define IRQ_NONE (0)
-#define IRQ_HANDLED (1)
-#define IRQ_RETVAL(x) ((x) != 0)
-#endif
-
-#ifndef XEN
-struct irqaction {
- irqreturn_t (*handler)(int, void *, struct pt_regs *);
- unsigned long flags;
- cpumask_t mask;
- const char *name;
- void *dev_id;
- struct irqaction *next;
- int irq;
- struct proc_dir_entry *dir;
-};
-
-extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs);
-extern int request_irq_vector(unsigned int,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long, const char *, void *);
-extern void release_irq_vector(unsigned int, void *);
-#endif
-
-
-#ifdef CONFIG_GENERIC_HARDIRQS
-extern void disable_irq_nosync(unsigned int irq);
-extern void disable_irq(unsigned int irq);
-extern void enable_irq(unsigned int irq);
-#endif
-
-/*
- * Temporary defines for UP kernels, until all code gets fixed.
- */
-#ifndef CONFIG_SMP
-static inline void __deprecated cli(void)
-{
- local_irq_disable();
-}
-static inline void __deprecated sti(void)
-{
- local_irq_enable();
-}
-static inline void __deprecated save_flags(unsigned long *x)
-{
- local_save_flags(*x);
-}
-#define save_flags(x) save_flags(&x);
-static inline void __deprecated restore_flags(unsigned long x)
-{
- local_irq_restore(x);
-}
-
-static inline void __deprecated save_and_cli(unsigned long *x)
-{
- local_irq_save(*x);
-}
-#define save_and_cli(x) save_and_cli(&x)
-#endif /* CONFIG_SMP */
-
-#ifndef XEN
-/* SoftIRQ primitives. */
-#define local_bh_disable() \
- do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
-#define __local_bh_enable() \
- do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
-
-extern void local_bh_enable(void);
-#endif
-
-/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
- frequency threaded job scheduling. For almost all the purposes
- tasklets are more than enough. F.e. all serial device BHs et
- al. should be converted to tasklets, not to softirqs.
- */
-
-#ifndef XEN
-enum
-{
- HI_SOFTIRQ=0,
- TIMER_SOFTIRQ,
- NET_TX_SOFTIRQ,
- NET_RX_SOFTIRQ,
- SCSI_SOFTIRQ,
- TASKLET_SOFTIRQ
-};
-#endif
-
-/* softirq mask and active fields moved to irq_cpustat_t in
- * asm/hardirq.h to get better cache usage. KAO
- */
-
-struct softirq_action
-{
- void (*action)(struct softirq_action *);
- void *data;
-};
-
-asmlinkage void do_softirq(void);
-#ifndef XEN
-extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
-#endif
-extern void softirq_init(void);
-#define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
-extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
-#ifndef XEN
-extern void FASTCALL(raise_softirq(unsigned int nr));
-#endif
-
-
-#ifndef XEN
-/* Tasklets --- multithreaded analogue of BHs.
-
- Main feature differing them of generic softirqs: tasklet
- is running only on one CPU simultaneously.
-
- Main feature differing them of BHs: different tasklets
- may be run simultaneously on different CPUs.
-
- Properties:
- * If tasklet_schedule() is called, then tasklet is guaranteed
- to be executed on some cpu at least once after this.
- * If the tasklet is already scheduled, but its excecution is still not
- started, it will be executed only once.
- * If this tasklet is already running on another CPU (or schedule is called
- from tasklet itself), it is rescheduled for later.
- * Tasklet is strictly serialized wrt itself, but not
- wrt another tasklets. If client needs some intertask synchronization,
- he makes it with spinlocks.
- */
-
-struct tasklet_struct
-{
- struct tasklet_struct *next;
- unsigned long state;
- atomic_t count;
- void (*func)(unsigned long);
- unsigned long data;
-};
-
-#define DECLARE_TASKLET(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
-
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
-
-
-enum
-{
- TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
-};
-
-#ifdef CONFIG_SMP
-static inline int tasklet_trylock(struct tasklet_struct *t)
-{
- return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
-}
-
-static inline void tasklet_unlock(struct tasklet_struct *t)
-{
- smp_mb__before_clear_bit();
- clear_bit(TASKLET_STATE_RUN, &(t)->state);
-}
-
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
-#else
-#define tasklet_trylock(t) 1
-#define tasklet_unlock_wait(t) do { } while (0)
-#define tasklet_unlock(t) do { } while (0)
-#endif
-
-extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
-
-static inline void tasklet_schedule(struct tasklet_struct *t)
-{
- if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
- __tasklet_schedule(t);
-}
-
-extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
-
-static inline void tasklet_hi_schedule(struct tasklet_struct *t)
-{
- if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
- __tasklet_hi_schedule(t);
-}
-
-
-static inline void tasklet_disable_nosync(struct tasklet_struct *t)
-{
- atomic_inc(&t->count);
- smp_mb__after_atomic_inc();
-}
-
-static inline void tasklet_disable(struct tasklet_struct *t)
-{
- tasklet_disable_nosync(t);
- tasklet_unlock_wait(t);
- smp_mb();
-}
-
-static inline void tasklet_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic_dec();
- atomic_dec(&t->count);
-}
-
-static inline void tasklet_hi_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic_dec();
- atomic_dec(&t->count);
-}
-
-extern void tasklet_kill(struct tasklet_struct *t);
-extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
-extern void tasklet_init(struct tasklet_struct *t,
- void (*func)(unsigned long), unsigned long data);
-#endif
-
-/*
- * Autoprobing for irqs:
- *
- * probe_irq_on() and probe_irq_off() provide robust primitives
- * for accurate IRQ probing during kernel initialization. They are
- * reasonably simple to use, are not "fooled" by spurious interrupts,
- * and, unlike other attempts at IRQ probing, they do not get hung on
- * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
- *
- * For reasonably foolproof probing, use them as follows:
- *
- * 1. clear and/or mask the device's internal interrupt.
- * 2. sti();
- * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
- * 4. enable the device and cause it to trigger an interrupt.
- * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
- * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
- * 7. service the device to clear its pending interrupt.
- * 8. loop again if paranoia is required.
- *
- * probe_irq_on() returns a mask of allocated irq's.
- *
- * probe_irq_off() takes the mask as a parameter,
- * and returns the irq number which occurred,
- * or zero if none occurred, or a negative irq number
- * if more than one irq occurred.
- */
-
-#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
-static inline unsigned long probe_irq_on(void)
-{
- return 0;
-}
-static inline int probe_irq_off(unsigned long val)
-{
- return 0;
-}
-static inline unsigned int probe_irq_mask(unsigned long val)
-{
- return 0;
-}
-#else
-extern unsigned long probe_irq_on(void); /* returns 0 on failure */
-extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
-extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
-#endif
-
-#endif
diff --git a/xen/include/asm-ia64/linux-xen/linux/kobject.h b/xen/include/asm-ia64/linux-xen/linux/kobject.h
deleted file mode 100644
index 9ecd705d22..0000000000
--- a/xen/include/asm-ia64/linux-xen/linux/kobject.h
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * kobject.h - generic kernel object infrastructure.
- *
- * Copyright (c) 2002-2003 Patrick Mochel
- * Copyright (c) 2002-2003 Open Source Development Labs
- *
- * This file is released under the GPLv2.
- *
- *
- * Please read Documentation/kobject.txt before using the kobject
- * interface, ESPECIALLY the parts about reference counts and object
- * destructors.
- */
-
-#ifndef _KOBJECT_H_
-#define _KOBJECT_H_
-
-#ifdef __KERNEL__
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/sysfs.h>
-#include <linux/compiler.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <linux/kref.h>
-#include <linux/kernel.h>
-#include <linux/wait.h>
-#include <asm/atomic.h>
-
-#define KOBJ_NAME_LEN 20
-#define UEVENT_HELPER_PATH_LEN 256
-
-/* path to the userspace helper executed on an event */
-extern char uevent_helper[];
-
-/* counter to tag the uevent, read only except for the kobject core */
-extern u64 uevent_seqnum;
-
-/* the actions here must match the proper string in lib/kobject_uevent.c */
-typedef int __bitwise kobject_action_t;
-enum kobject_action {
- KOBJ_ADD = (__force kobject_action_t) 0x01, /* exclusive to core */
- KOBJ_REMOVE = (__force kobject_action_t) 0x02, /* exclusive to core */
- KOBJ_CHANGE = (__force kobject_action_t) 0x03, /* device state change */
- KOBJ_MOUNT = (__force kobject_action_t) 0x04, /* mount event for block devices (broken) */
- KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */
- KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */
- KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */
-};
-
-struct kobject {
- const char * k_name;
- char name[KOBJ_NAME_LEN];
- struct kref kref;
- struct list_head entry;
- struct kobject * parent;
- struct kset * kset;
- struct kobj_type * ktype;
- struct dentry * dentry;
-#ifndef XEN
- wait_queue_head_t poll;
-#endif
-};
-
-extern int kobject_set_name(struct kobject *, const char *, ...)
- __attribute__((format(printf,2,3)));
-
-static inline const char * kobject_name(const struct kobject * kobj)
-{
- return kobj->k_name;
-}
-
-extern void kobject_init(struct kobject *);
-extern void kobject_cleanup(struct kobject *);
-
-extern int __must_check kobject_add(struct kobject *);
-extern void kobject_del(struct kobject *);
-
-extern int __must_check kobject_rename(struct kobject *, const char *new_name);
-
-extern int __must_check kobject_register(struct kobject *);
-extern void kobject_unregister(struct kobject *);
-
-extern struct kobject * kobject_get(struct kobject *);
-extern void kobject_put(struct kobject *);
-
-extern struct kobject *kobject_add_dir(struct kobject *, const char *);
-
-extern char * kobject_get_path(struct kobject *, gfp_t);
-
-struct kobj_type {
- void (*release)(struct kobject *);
- struct sysfs_ops * sysfs_ops;
- struct attribute ** default_attrs;
-};
-
-
-/**
- * kset - a set of kobjects of a specific type, belonging
- * to a specific subsystem.
- *
- * All kobjects of a kset should be embedded in an identical
- * type. This type may have a descriptor, which the kset points
- * to. This allows there to exist sets of objects of the same
- * type in different subsystems.
- *
- * A subsystem does not have to be a list of only one type
- * of object; multiple ksets can belong to one subsystem. All
- * ksets of a subsystem share the subsystem's lock.
- *
- * Each kset can support specific event variables; it can
- * supress the event generation or add subsystem specific
- * variables carried with the event.
- */
-struct kset_uevent_ops {
- int (*filter)(struct kset *kset, struct kobject *kobj);
- const char *(*name)(struct kset *kset, struct kobject *kobj);
- int (*uevent)(struct kset *kset, struct kobject *kobj, char **envp,
- int num_envp, char *buffer, int buffer_size);
-};
-
-struct kset {
- struct subsystem * subsys;
- struct kobj_type * ktype;
- struct list_head list;
- spinlock_t list_lock;
- struct kobject kobj;
- struct kset_uevent_ops * uevent_ops;
-};
-
-
-extern void kset_init(struct kset * k);
-extern int __must_check kset_add(struct kset * k);
-extern int __must_check kset_register(struct kset * k);
-extern void kset_unregister(struct kset * k);
-
-static inline struct kset * to_kset(struct kobject * kobj)
-{
- return kobj ? container_of(kobj,struct kset,kobj) : NULL;
-}
-
-static inline struct kset * kset_get(struct kset * k)
-{
- return k ? to_kset(kobject_get(&k->kobj)) : NULL;
-}
-
-static inline void kset_put(struct kset * k)
-{
- kobject_put(&k->kobj);
-}
-
-static inline struct kobj_type * get_ktype(struct kobject * k)
-{
- if (k->kset && k->kset->ktype)
- return k->kset->ktype;
- else
- return k->ktype;
-}
-
-extern struct kobject * kset_find_obj(struct kset *, const char *);
-
-
-/**
- * Use this when initializing an embedded kset with no other
- * fields to initialize.
- */
-#define set_kset_name(str) .kset = { .kobj = { .name = str } }
-
-
-
-struct subsystem {
- struct kset kset;
-#ifndef XEN
- struct rw_semaphore rwsem;
-#endif
-};
-
-#define decl_subsys(_name,_type,_uevent_ops) \
-struct subsystem _name##_subsys = { \
- .kset = { \
- .kobj = { .name = __stringify(_name) }, \
- .ktype = _type, \
- .uevent_ops =_uevent_ops, \
- } \
-}
-#define decl_subsys_name(_varname,_name,_type,_uevent_ops) \
-struct subsystem _varname##_subsys = { \
- .kset = { \
- .kobj = { .name = __stringify(_name) }, \
- .ktype = _type, \
- .uevent_ops =_uevent_ops, \
- } \
-}
-
-/* The global /sys/kernel/ subsystem for people to chain off of */
-extern struct subsystem kernel_subsys;
-/* The global /sys/hypervisor/ subsystem */
-extern struct subsystem hypervisor_subsys;
-
-/**
- * Helpers for setting the kset of registered objects.
- * Often, a registered object belongs to a kset embedded in a
- * subsystem. These do no magic, just make the resulting code
- * easier to follow.
- */
-
-/**
- * kobj_set_kset_s(obj,subsys) - set kset for embedded kobject.
- * @obj: ptr to some object type.
- * @subsys: a subsystem object (not a ptr).
- *
- * Can be used for any object type with an embedded ->kobj.
- */
-
-#define kobj_set_kset_s(obj,subsys) \
- (obj)->kobj.kset = &(subsys).kset
-
-/**
- * kset_set_kset_s(obj,subsys) - set kset for embedded kset.
- * @obj: ptr to some object type.
- * @subsys: a subsystem object (not a ptr).
- *
- * Can be used for any object type with an embedded ->kset.
- * Sets the kset of @obj's embedded kobject (via its embedded
- * kset) to @subsys.kset. This makes @obj a member of that
- * kset.
- */
-
-#define kset_set_kset_s(obj,subsys) \
- (obj)->kset.kobj.kset = &(subsys).kset
-
-/**
- * subsys_set_kset(obj,subsys) - set kset for subsystem
- * @obj: ptr to some object type.
- * @subsys: a subsystem object (not a ptr).
- *
- * Can be used for any object type with an embedded ->subsys.
- * Sets the kset of @obj's kobject to @subsys.kset. This makes
- * the object a member of that kset.
- */
-
-#define subsys_set_kset(obj,_subsys) \
- (obj)->subsys.kset.kobj.kset = &(_subsys).kset
-
-extern void subsystem_init(struct subsystem *);
-extern int __must_check subsystem_register(struct subsystem *);
-extern void subsystem_unregister(struct subsystem *);
-
-static inline struct subsystem * subsys_get(struct subsystem * s)
-{
- return s ? container_of(kset_get(&s->kset),struct subsystem,kset) : NULL;
-}
-
-static inline void subsys_put(struct subsystem * s)
-{
- kset_put(&s->kset);
-}
-
-struct subsys_attribute {
-#ifndef XEN
- struct attribute attr;
-#endif
- ssize_t (*show)(struct subsystem *, char *);
- ssize_t (*store)(struct subsystem *, const char *, size_t);
-};
-
-extern int __must_check subsys_create_file(struct subsystem * ,
- struct subsys_attribute *);
-
-#if defined(CONFIG_HOTPLUG)
-void kobject_uevent(struct kobject *kobj, enum kobject_action action);
-
-int add_uevent_var(char **envp, int num_envp, int *cur_index,
- char *buffer, int buffer_size, int *cur_len,
- const char *format, ...)
- __attribute__((format (printf, 7, 8)));
-#else
-static inline void kobject_uevent(struct kobject *kobj, enum kobject_action action) { }
-
-static inline int add_uevent_var(char **envp, int num_envp, int *cur_index,
- char *buffer, int buffer_size, int *cur_len,
- const char *format, ...)
-{ return 0; }
-#endif
-
-#endif /* __KERNEL__ */
-#endif /* _KOBJECT_H_ */
diff --git a/xen/include/asm-ia64/linux-xen/linux/linux-pci.h b/xen/include/asm-ia64/linux-xen/linux/linux-pci.h
deleted file mode 100644
index e2cac1b850..0000000000
--- a/xen/include/asm-ia64/linux-xen/linux/linux-pci.h
+++ /dev/null
@@ -1,836 +0,0 @@
-/*
- * pci.h
- *
- * PCI defines and function prototypes
- * Copyright 1994, Drew Eckhardt
- * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
- *
- * For more information, please consult the following manuals (look at
- * http://www.pcisig.com/ for how to get them):
- *
- * PCI BIOS Specification
- * PCI Local Bus Specification
- * PCI to PCI Bridge Specification
- * PCI System Design Guide
- */
-
-#ifndef LINUX_PCI_H
-#define LINUX_PCI_H
-
-/* Include the pci register defines */
-#include <linux/pci_regs.h>
-
-/* Include the ID list */
-#include <linux/pci_ids.h>
-#ifdef XEN
-#include <asm/processor.h>
-#endif
-
-/*
- * The PCI interface treats multi-function devices as independent
- * devices. The slot/function address of each device is encoded
- * in a single byte as follows:
- *
- * 7:3 = slot
- * 2:0 = function
- */
-
-#ifndef XEN
-#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
-#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
-#define PCI_FUNC(devfn) ((devfn) & 0x07)
-#endif
-
-/* Ioctls for /proc/bus/pci/X/Y nodes. */
-#define PCIIOC_BASE ('P' << 24 | 'C' << 16 | 'I' << 8)
-#define PCIIOC_CONTROLLER (PCIIOC_BASE | 0x00) /* Get controller for PCI device. */
-#define PCIIOC_MMAP_IS_IO (PCIIOC_BASE | 0x01) /* Set mmap state to I/O space. */
-#define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */
-#define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */
-
-#ifdef __KERNEL__
-
-#include <linux/mod_devicetable.h>
-
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/list.h>
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-
-/* File state for mmap()s on /proc/bus/pci/X/Y */
-enum pci_mmap_state {
- pci_mmap_io,
- pci_mmap_mem
-};
-
-/* This defines the direction arg to the DMA mapping routines. */
-#define PCI_DMA_BIDIRECTIONAL 0
-#define PCI_DMA_TODEVICE 1
-#define PCI_DMA_FROMDEVICE 2
-#define PCI_DMA_NONE 3
-
-#define DEVICE_COUNT_COMPATIBLE 4
-#define DEVICE_COUNT_RESOURCE 12
-
-typedef int __bitwise pci_power_t;
-
-#define PCI_D0 ((pci_power_t __force) 0)
-#define PCI_D1 ((pci_power_t __force) 1)
-#define PCI_D2 ((pci_power_t __force) 2)
-#define PCI_D3hot ((pci_power_t __force) 3)
-#define PCI_D3cold ((pci_power_t __force) 4)
-#define PCI_UNKNOWN ((pci_power_t __force) 5)
-#define PCI_POWER_ERROR ((pci_power_t __force) -1)
-
-/** The pci_channel state describes connectivity between the CPU and
- * the pci device. If some PCI bus between here and the pci device
- * has crashed or locked up, this info is reflected here.
- */
-typedef unsigned int __bitwise pci_channel_state_t;
-
-enum pci_channel_state {
- /* I/O channel is in normal state */
- pci_channel_io_normal = (__force pci_channel_state_t) 1,
-
- /* I/O to channel is blocked */
- pci_channel_io_frozen = (__force pci_channel_state_t) 2,
-
- /* PCI card is dead */
- pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
-};
-
-typedef unsigned short __bitwise pci_bus_flags_t;
-enum pci_bus_flags {
- PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
-};
-
-struct pci_cap_saved_state {
- struct hlist_node next;
- char cap_nr;
- u32 data[0];
-};
-
-/*
- * The pci_dev structure is used to describe PCI devices.
- */
-#ifdef XEN
-struct sn_pci_dev {
-#else
-struct pci_dev {
-#endif
- struct list_head global_list; /* node in list of all PCI devices */
- struct list_head bus_list; /* node in per-bus list */
- struct pci_bus *bus; /* bus this device is on */
- struct pci_bus *subordinate; /* bus this device bridges to */
-
- void *sysdata; /* hook for sys-specific extension */
- struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
-
- unsigned int devfn; /* encoded device & function index */
- unsigned short vendor;
- unsigned short device;
- unsigned short subsystem_vendor;
- unsigned short subsystem_device;
- unsigned int class; /* 3 bytes: (base,sub,prog-if) */
- u8 hdr_type; /* PCI header type (`multi' flag masked out) */
- u8 rom_base_reg; /* which config register controls the ROM */
- u8 pin; /* which interrupt pin this device uses */
-
- struct pci_driver *driver; /* which driver has allocated this device */
- u64 dma_mask; /* Mask of the bits of bus address this
- device implements. Normally this is
- 0xffffffff. You only need to change
- this if your device has broken DMA
- or supports 64-bit transfers. */
-
- pci_power_t current_state; /* Current operating state. In ACPI-speak,
- this is D0-D3, D0 being fully functional,
- and D3 being off. */
-
- pci_channel_state_t error_state; /* current connectivity state */
- struct device dev; /* Generic device interface */
-
- /* device is compatible with these IDs */
- unsigned short vendor_compatible[DEVICE_COUNT_COMPATIBLE];
- unsigned short device_compatible[DEVICE_COUNT_COMPATIBLE];
-
- int cfg_size; /* Size of configuration space */
-
- /*
- * Instead of touching interrupt line and base address registers
- * directly, use the values stored here. They might be different!
- */
- unsigned int irq;
- struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
-
- /* These fields are used by common fixups */
- unsigned int transparent:1; /* Transparent PCI bridge */
- unsigned int multifunction:1;/* Part of multi-function device */
- /* keep track of device state */
- unsigned int is_enabled:1; /* pci_enable_device has been called */
- unsigned int is_busmaster:1; /* device is busmaster */
- unsigned int no_msi:1; /* device may not use msi */
- unsigned int no_d1d2:1; /* only allow d0 or d3 */
- unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
- unsigned int broken_parity_status:1; /* Device generates false positive parity */
- unsigned int msi_enabled:1;
- unsigned int msix_enabled:1;
-
- u32 saved_config_space[16]; /* config space saved at suspend time */
- struct hlist_head saved_cap_space;
- struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
- int rom_attr_enabled; /* has display of the rom attribute been enabled? */
- struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
-};
-
-#ifndef XEN
-#define pci_dev_g(n) list_entry(n, struct pci_dev, global_list)
-#define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list)
-#define to_pci_dev(n) container_of(n, struct pci_dev, dev)
-#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
-
-static inline struct pci_cap_saved_state *pci_find_saved_cap(
- struct pci_dev *pci_dev,char cap)
-{
- struct pci_cap_saved_state *tmp;
- struct hlist_node *pos;
-
- hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
- if (tmp->cap_nr == cap)
- return tmp;
- }
- return NULL;
-}
-
-static inline void pci_add_saved_cap(struct pci_dev *pci_dev,
- struct pci_cap_saved_state *new_cap)
-{
- hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
-}
-
-static inline void pci_remove_saved_cap(struct pci_cap_saved_state *cap)
-{
- hlist_del(&cap->next);
-}
-#endif
-
-/*
- * For PCI devices, the region numbers are assigned this way:
- *
- * 0-5 standard PCI regions
- * 6 expansion ROM
- * 7-10 bridges: address space assigned to buses behind the bridge
- */
-
-#define PCI_ROM_RESOURCE 6
-#define PCI_BRIDGE_RESOURCES 7
-#define PCI_NUM_RESOURCES 11
-
-#ifndef PCI_BUS_NUM_RESOURCES
-#define PCI_BUS_NUM_RESOURCES 8
-#endif
-
-#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
-
-struct pci_bus {
- struct list_head node; /* node in list of buses */
- struct pci_bus *parent; /* parent bus this bridge is on */
- struct list_head children; /* list of child buses */
- struct list_head devices; /* list of devices on this bus */
-#ifdef XEN
- struct sn_pci_dev *self; /* bridge device as seen by parent */
-#else
- struct pci_dev *self; /* bridge device as seen by parent */
-#endif
- struct resource *resource[PCI_BUS_NUM_RESOURCES];
- /* address space routed to this bus */
-
- struct pci_ops *ops; /* configuration access functions */
- void *sysdata; /* hook for sys-specific extension */
- struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
-
- unsigned char number; /* bus number */
- unsigned char primary; /* number of primary bridge */
- unsigned char secondary; /* number of secondary bridge */
- unsigned char subordinate; /* max number of subordinate buses */
-
- char name[48];
-
- unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
- pci_bus_flags_t bus_flags; /* Inherited by child busses */
- struct device *bridge;
- struct class_device class_dev;
- struct bin_attribute *legacy_io; /* legacy I/O for this bus */
- struct bin_attribute *legacy_mem; /* legacy mem */
-};
-
-#define pci_bus_b(n) list_entry(n, struct pci_bus, node)
-#define to_pci_bus(n) container_of(n, struct pci_bus, class_dev)
-
-/*
- * Error values that may be returned by PCI functions.
- */
-#define PCIBIOS_SUCCESSFUL 0x00
-#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
-#define PCIBIOS_BAD_VENDOR_ID 0x83
-#define PCIBIOS_DEVICE_NOT_FOUND 0x86
-#define PCIBIOS_BAD_REGISTER_NUMBER 0x87
-#define PCIBIOS_SET_FAILED 0x88
-#define PCIBIOS_BUFFER_TOO_SMALL 0x89
-
-/* Low-level architecture-dependent routines */
-
-struct pci_ops {
- int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
- int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
-};
-
-struct pci_raw_ops {
- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
- int reg, int len, u32 *val);
- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
- int reg, int len, u32 val);
-};
-
-extern struct pci_raw_ops *raw_pci_ops;
-
-struct pci_bus_region {
- unsigned long start;
- unsigned long end;
-};
-
-struct pci_dynids {
- spinlock_t lock; /* protects list, index */
- struct list_head list; /* for IDs added at runtime */
- unsigned int use_driver_data:1; /* pci_driver->driver_data is used */
-};
-
-/* ---------------------------------------------------------------- */
-/** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
- * a set fof callbacks in struct pci_error_handlers, then that device driver
- * will be notified of PCI bus errors, and will be driven to recovery
- * when an error occurs.
- */
-
-typedef unsigned int __bitwise pci_ers_result_t;
-
-enum pci_ers_result {
- /* no result/none/not supported in device driver */
- PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
-
- /* Device driver can recover without slot reset */
- PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
-
- /* Device driver wants slot to be reset. */
- PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
-
- /* Device has completely failed, is unrecoverable */
- PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
-
- /* Device driver is fully recovered and operational */
- PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
-};
-
-/* PCI bus error event callbacks */
-struct pci_error_handlers
-{
- /* PCI bus error detected on this device */
- pci_ers_result_t (*error_detected)(struct pci_dev *dev,
- enum pci_channel_state error);
-
- /* MMIO has been re-enabled, but not DMA */
- pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
-
- /* PCI Express link has been reset */
- pci_ers_result_t (*link_reset)(struct pci_dev *dev);
-
- /* PCI slot has been reset */
- pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
-
- /* Device driver may resume normal operations */
- void (*resume)(struct pci_dev *dev);
-};
-
-/* ---------------------------------------------------------------- */
-#ifndef XEN
-struct module;
-struct pci_driver {
- struct list_head node;
- char *name;
- const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
- int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
- void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
- int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
- int (*suspend_late) (struct pci_dev *dev, pm_message_t state);
- int (*resume_early) (struct pci_dev *dev);
- int (*resume) (struct pci_dev *dev); /* Device woken up */
- int (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable); /* Enable wake event */
- void (*shutdown) (struct pci_dev *dev);
-
- struct pci_error_handlers *err_handler;
- struct device_driver driver;
- struct pci_dynids dynids;
-
- int multithread_probe;
-};
-
-#define to_pci_driver(drv) container_of(drv,struct pci_driver, driver)
-
-/**
- * PCI_DEVICE - macro used to describe a specific pci device
- * @vend: the 16 bit PCI Vendor ID
- * @dev: the 16 bit PCI Device ID
- *
- * This macro is used to create a struct pci_device_id that matches a
- * specific device. The subvendor and subdevice fields will be set to
- * PCI_ANY_ID.
- */
-#define PCI_DEVICE(vend,dev) \
- .vendor = (vend), .device = (dev), \
- .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
-
-/**
- * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
- * @dev_class: the class, subclass, prog-if triple for this device
- * @dev_class_mask: the class mask for this device
- *
- * This macro is used to create a struct pci_device_id that matches a
- * specific PCI class. The vendor, device, subvendor, and subdevice
- * fields will be set to PCI_ANY_ID.
- */
-#define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
- .class = (dev_class), .class_mask = (dev_class_mask), \
- .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
- .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
-
-/*
- * pci_module_init is obsolete, this stays here till we fix up all usages of it
- * in the tree.
- */
-#define pci_module_init pci_register_driver
-
-/* these external functions are only available when PCI support is enabled */
-#ifdef CONFIG_PCI
-
-extern struct bus_type pci_bus_type;
-
-/* Do NOT directly access these two variables, unless you are arch specific pci
- * code, or pci core code. */
-extern struct list_head pci_root_buses; /* list of all known PCI buses */
-extern struct list_head pci_devices; /* list of all devices */
-
-void pcibios_fixup_bus(struct pci_bus *);
-int __must_check pcibios_enable_device(struct pci_dev *, int mask);
-char *pcibios_setup (char *str);
-
-/* Used only when drivers/pci/setup.c is used */
-void pcibios_align_resource(void *, struct resource *, resource_size_t,
- resource_size_t);
-void pcibios_update_irq(struct pci_dev *, int irq);
-
-/* Generic PCI functions used internally */
-
-extern struct pci_bus *pci_find_bus(int domain, int busnr);
-void pci_bus_add_devices(struct pci_bus *bus);
-struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata);
-static inline struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata)
-{
- struct pci_bus *root_bus;
- root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata);
- if (root_bus)
- pci_bus_add_devices(root_bus);
- return root_bus;
-}
-struct pci_bus *pci_create_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata);
-struct pci_bus * pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr);
-int pci_scan_slot(struct pci_bus *bus, int devfn);
-struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn);
-void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
-unsigned int pci_scan_child_bus(struct pci_bus *bus);
-int __must_check pci_bus_add_device(struct pci_dev *dev);
-void pci_read_bridge_bases(struct pci_bus *child);
-struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res);
-int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
-extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
-extern void pci_dev_put(struct pci_dev *dev);
-extern void pci_remove_bus(struct pci_bus *b);
-extern void pci_remove_bus_device(struct pci_dev *dev);
-extern void pci_stop_bus_device(struct pci_dev *dev);
-void pci_setup_cardbus(struct pci_bus *bus);
-extern void pci_sort_breadthfirst(void);
-
-/* Generic PCI functions exported to card drivers */
-
-struct pci_dev *pci_find_device (unsigned int vendor, unsigned int device, const struct pci_dev *from);
-struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int device, const struct pci_dev *from);
-struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn);
-int pci_find_capability (struct pci_dev *dev, int cap);
-int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap);
-int pci_find_ext_capability (struct pci_dev *dev, int cap);
-struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
-
-struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
- struct pci_dev *from);
-struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device,
- struct pci_dev *from);
-
-struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device,
- unsigned int ss_vendor, unsigned int ss_device,
- struct pci_dev *from);
-struct pci_dev *pci_get_slot (struct pci_bus *bus, unsigned int devfn);
-struct pci_dev *pci_get_bus_and_slot (unsigned int bus, unsigned int devfn);
-struct pci_dev *pci_get_class (unsigned int class, struct pci_dev *from);
-int pci_dev_present(const struct pci_device_id *ids);
-
-int pci_bus_read_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 *val);
-int pci_bus_read_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 *val);
-int pci_bus_read_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 *val);
-int pci_bus_write_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 val);
-int pci_bus_write_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 val);
-int pci_bus_write_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 val);
-
-static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val)
-{
- return pci_bus_read_config_byte (dev->bus, dev->devfn, where, val);
-}
-static inline int pci_read_config_word(struct pci_dev *dev, int where, u16 *val)
-{
- return pci_bus_read_config_word (dev->bus, dev->devfn, where, val);
-}
-static inline int pci_read_config_dword(struct pci_dev *dev, int where, u32 *val)
-{
- return pci_bus_read_config_dword (dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_byte(struct pci_dev *dev, int where, u8 val)
-{
- return pci_bus_write_config_byte (dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_word(struct pci_dev *dev, int where, u16 val)
-{
- return pci_bus_write_config_word (dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 val)
-{
- return pci_bus_write_config_dword (dev->bus, dev->devfn, where, val);
-}
-
-int __must_check pci_enable_device(struct pci_dev *dev);
-int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask);
-void pci_disable_device(struct pci_dev *dev);
-void pci_set_master(struct pci_dev *dev);
-#define HAVE_PCI_SET_MWI
-int __must_check pci_set_mwi(struct pci_dev *dev);
-void pci_clear_mwi(struct pci_dev *dev);
-void pci_intx(struct pci_dev *dev, int enable);
-int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
-int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask);
-void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
-int __must_check pci_assign_resource(struct pci_dev *dev, int i);
-int __must_check pci_assign_resource_fixed(struct pci_dev *dev, int i);
-void pci_restore_bars(struct pci_dev *dev);
-
-/* ROM control related routines */
-void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
-void __iomem __must_check *pci_map_rom_copy(struct pci_dev *pdev, size_t *size);
-void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
-void pci_remove_rom(struct pci_dev *pdev);
-
-/* Power management related routines */
-int pci_save_state(struct pci_dev *dev);
-int pci_restore_state(struct pci_dev *dev);
-int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
-pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
-
-/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
-void pci_bus_assign_resources(struct pci_bus *bus);
-void pci_bus_size_bridges(struct pci_bus *bus);
-int pci_claim_resource(struct pci_dev *, int);
-void pci_assign_unassigned_resources(void);
-void pdev_enable_device(struct pci_dev *);
-void pdev_sort_resources(struct pci_dev *, struct resource_list *);
-void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
- int (*)(struct pci_dev *, u8, u8));
-#define HAVE_PCI_REQ_REGIONS 2
-int __must_check pci_request_regions(struct pci_dev *, const char *);
-void pci_release_regions(struct pci_dev *);
-int __must_check pci_request_region(struct pci_dev *, int, const char *);
-void pci_release_region(struct pci_dev *, int);
-
-/* drivers/pci/bus.c */
-int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
- struct resource *res, resource_size_t size,
- resource_size_t align, resource_size_t min,
- unsigned int type_mask,
- void (*alignf)(void *, struct resource *,
- resource_size_t, resource_size_t),
- void *alignf_data);
-void pci_enable_bridges(struct pci_bus *bus);
-
-/* Proper probing supporting hot-pluggable devices */
-int __must_check __pci_register_driver(struct pci_driver *, struct module *);
-static inline int __must_check pci_register_driver(struct pci_driver *driver)
-{
- return __pci_register_driver(driver, THIS_MODULE);
-}
-
-void pci_unregister_driver(struct pci_driver *);
-void pci_remove_behind_bridge(struct pci_dev *);
-struct pci_driver *pci_dev_driver(const struct pci_dev *);
-const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev);
-const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, struct pci_dev *dev);
-int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass);
-
-void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
- void *userdata);
-int pci_cfg_space_size(struct pci_dev *dev);
-unsigned char pci_bus_max_busnr(struct pci_bus* bus);
-
-/* kmem_cache style wrapper around pci_alloc_consistent() */
-
-#include <linux/dmapool.h>
-
-#define pci_pool dma_pool
-#define pci_pool_create(name, pdev, size, align, allocation) \
- dma_pool_create(name, &pdev->dev, size, align, allocation)
-#define pci_pool_destroy(pool) dma_pool_destroy(pool)
-#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
-#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
-
-enum pci_dma_burst_strategy {
- PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
- strategy_parameter is N/A */
- PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
- byte boundaries */
- PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
- strategy_parameter byte boundaries */
-};
-
-#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
-extern struct pci_dev *isa_bridge;
-#endif
-
-struct msix_entry {
- u16 vector; /* kernel uses to write allocated vector */
- u16 entry; /* driver uses to specify entry, OS writes */
-};
-
-
-#ifndef CONFIG_PCI_MSI
-static inline void pci_scan_msi_device(struct pci_dev *dev) {}
-static inline int pci_enable_msi(struct pci_dev *dev) {return -1;}
-static inline void pci_disable_msi(struct pci_dev *dev) {}
-static inline int pci_enable_msix(struct pci_dev* dev,
- struct msix_entry *entries, int nvec) {return -1;}
-static inline void pci_disable_msix(struct pci_dev *dev) {}
-static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
-#else
-extern void pci_scan_msi_device(struct pci_dev *dev);
-extern int pci_enable_msi(struct pci_dev *dev);
-extern void pci_disable_msi(struct pci_dev *dev);
-extern int pci_enable_msix(struct pci_dev* dev,
- struct msix_entry *entries, int nvec);
-extern void pci_disable_msix(struct pci_dev *dev);
-extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
-#endif
-
-#ifdef CONFIG_HT_IRQ
-/* The functions a driver should call */
-int ht_create_irq(struct pci_dev *dev, int idx);
-void ht_destroy_irq(unsigned int irq);
-#endif /* CONFIG_HT_IRQ */
-
-extern void pci_block_user_cfg_access(struct pci_dev *dev);
-extern void pci_unblock_user_cfg_access(struct pci_dev *dev);
-
-/*
- * PCI domain support. Sometimes called PCI segment (eg by ACPI),
- * a PCI domain is defined to be a set of PCI busses which share
- * configuration space.
- */
-#ifndef CONFIG_PCI_DOMAINS
-static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- return 0;
-}
-#endif
-
-#else /* CONFIG_PCI is not enabled */
-
-/*
- * If the system does not have PCI, clearly these return errors. Define
- * these as simple inline functions to avoid hair in drivers.
- */
-
-#define _PCI_NOP(o,s,t) \
- static inline int pci_##o##_config_##s (struct pci_dev *dev, int where, t val) \
- { return PCIBIOS_FUNC_NOT_SUPPORTED; }
-#define _PCI_NOP_ALL(o,x) _PCI_NOP(o,byte,u8 x) \
- _PCI_NOP(o,word,u16 x) \
- _PCI_NOP(o,dword,u32 x)
-_PCI_NOP_ALL(read, *)
-_PCI_NOP_ALL(write,)
-
-static inline struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from)
-{ return NULL; }
-
-static inline struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn)
-{ return NULL; }
-
-static inline struct pci_dev *pci_get_device(unsigned int vendor,
- unsigned int device, struct pci_dev *from)
-{ return NULL; }
-
-static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor,
- unsigned int device, struct pci_dev *from)
-{ return NULL; }
-
-static inline struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device,
-unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from)
-{ return NULL; }
-
-static inline struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
-{ return NULL; }
-
-#define pci_dev_present(ids) (0)
-#define pci_dev_put(dev) do { } while (0)
-
-static inline void pci_set_master(struct pci_dev *dev) { }
-static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
-static inline void pci_disable_device(struct pci_dev *dev) { }
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; }
-static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY;}
-static inline int __pci_register_driver(struct pci_driver *drv, struct module *owner) { return 0;}
-static inline int pci_register_driver(struct pci_driver *drv) { return 0;}
-static inline void pci_unregister_driver(struct pci_driver *drv) { }
-static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; }
-static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; }
-static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) {return 0; }
-static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; }
-
-/* Power management related routines */
-static inline int pci_save_state(struct pci_dev *dev) { return 0; }
-static inline int pci_restore_state(struct pci_dev *dev) { return 0; }
-static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { return 0; }
-static inline pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { return PCI_D0; }
-static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { return 0; }
-
-#define isa_bridge ((struct pci_dev *)NULL)
-
-#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
-
-static inline void pci_block_user_cfg_access(struct pci_dev *dev) { }
-static inline void pci_unblock_user_cfg_access(struct pci_dev *dev) { }
-
-#endif /* CONFIG_PCI */
-
-/* Include architecture-dependent settings and functions */
-#endif
-
-#include <asm/pci.h>
-
-#ifndef XEN
-/* these helpers provide future and backwards compatibility
- * for accessing popular PCI BAR info */
-#define pci_resource_start(dev,bar) ((dev)->resource[(bar)].start)
-#define pci_resource_end(dev,bar) ((dev)->resource[(bar)].end)
-#define pci_resource_flags(dev,bar) ((dev)->resource[(bar)].flags)
-#define pci_resource_len(dev,bar) \
- ((pci_resource_start((dev),(bar)) == 0 && \
- pci_resource_end((dev),(bar)) == \
- pci_resource_start((dev),(bar))) ? 0 : \
- \
- (pci_resource_end((dev),(bar)) - \
- pci_resource_start((dev),(bar)) + 1))
-
-/* Similar to the helpers above, these manipulate per-pci_dev
- * driver-specific data. They are really just a wrapper around
- * the generic device structure functions of these calls.
- */
-static inline void *pci_get_drvdata (struct pci_dev *pdev)
-{
- return dev_get_drvdata(&pdev->dev);
-}
-
-static inline void pci_set_drvdata (struct pci_dev *pdev, void *data)
-{
- dev_set_drvdata(&pdev->dev, data);
-}
-
-/* If you want to know what to call your pci_dev, ask this function.
- * Again, it's a wrapper around the generic device.
- */
-static inline char *pci_name(struct pci_dev *pdev)
-{
- return pdev->dev.bus_id;
-}
-
-
-/* Some archs don't want to expose struct resource to userland as-is
- * in sysfs and /proc
- */
-#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
-static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc, resource_size_t *start,
- resource_size_t *end)
-{
- *start = rsrc->start;
- *end = rsrc->end;
-}
-#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
-
-
-/*
- * The world is not perfect and supplies us with broken PCI devices.
- * For at least a part of these bugs we need a work-around, so both
- * generic (drivers/pci/quirks.c) and per-architecture code can define
- * fixup hooks to be called for particular buggy devices.
- */
-
-struct pci_fixup {
- u16 vendor, device; /* You can use PCI_ANY_ID here of course */
- void (*hook)(struct pci_dev *dev);
-};
-
-enum pci_fixup_pass {
- pci_fixup_early, /* Before probing BARs */
- pci_fixup_header, /* After reading configuration header */
- pci_fixup_final, /* Final phase of device fixups */
- pci_fixup_enable, /* pci_enable_device() time */
-};
-
-/* Anonymous variables would be nice... */
-#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook) \
- static const struct pci_fixup __pci_fixup_##name __attribute_used__ \
- __attribute__((__section__(#section))) = { vendor, device, hook };
-#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
- DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
- vendor##device##hook, vendor, device, hook)
-#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
- DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
- vendor##device##hook, vendor, device, hook)
-#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
- DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
- vendor##device##hook, vendor, device, hook)
-#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
- DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
- vendor##device##hook, vendor, device, hook)
-
-
-void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
-
-extern int pci_pci_problems;
-#endif
-#define PCIPCI_FAIL 1 /* No PCI PCI DMA */
-#define PCIPCI_TRITON 2
-#define PCIPCI_NATOMA 4
-#define PCIPCI_VIAETBF 8
-#define PCIPCI_VSFX 16
-#define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
-#define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
-
-#endif /* __KERNEL__ */
-#endif /* LINUX_PCI_H */
diff --git a/xen/include/asm-ia64/linux-xen/linux/oprofile.h b/xen/include/asm-ia64/linux-xen/linux/oprofile.h
deleted file mode 100644
index 559c4c38a9..0000000000
--- a/xen/include/asm-ia64/linux-xen/linux/oprofile.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * @file oprofile.h
- *
- * API for machine-specific interrupts to interface
- * to oprofile.
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef OPROFILE_H
-#define OPROFILE_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-
-struct super_block;
-struct dentry;
-struct file_operations;
-struct pt_regs;
-
-/* Operations structure to be filled in */
-struct oprofile_operations {
- /* create any necessary configuration files in the oprofile fs.
- * Optional. */
- int (*create_files)(struct super_block * sb, struct dentry * root);
- /* Do any necessary interrupt setup. Optional. */
- int (*setup)(void);
- /* Do any necessary interrupt shutdown. Optional. */
- void (*shutdown)(void);
- /* Start delivering interrupts. */
- int (*start)(void);
- /* Stop delivering interrupts. */
- void (*stop)(void);
- /* Initiate a stack backtrace. Optional. */
- void (*backtrace)(struct pt_regs * const regs, unsigned int depth);
- /* CPU identification string. */
- char * cpu_type;
-};
-
-/**
- * One-time initialisation. *ops must be set to a filled-in
- * operations structure. This is called even in timer interrupt
- * mode so an arch can set a backtrace callback.
- *
- * If an error occurs, the fields should be left untouched.
- */
-int oprofile_arch_init(struct oprofile_operations * ops);
-
-/**
- * One-time exit/cleanup for the arch.
- */
-void oprofile_arch_exit(void);
-
-/**
- * Add a sample. This may be called from any context. Pass
- * smp_processor_id() as cpu.
- */
-void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
-
-/* Use this instead when the PC value is not from the regs. Doesn't
- * backtrace. */
-void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event);
-
-/* add a backtrace entry, to be called from the ->backtrace callback */
-void oprofile_add_trace(unsigned long eip);
-
-
-/**
- * Create a file of the given name as a child of the given root, with
- * the specified file operations.
- */
-int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
- char const * name, struct file_operations * fops);
-
-int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
- char const * name, struct file_operations * fops, int perm);
-
-/** Create a file for read/write access to an unsigned long. */
-int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an unsigned long. */
-int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an atomic_t. */
-int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
- char const * name, atomic_t * val);
-
-/** create a directory */
-struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
- char const * name);
-
-/**
- * Write the given asciz string to the given user buffer @buf, updating *offset
- * appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Convert an unsigned long value into ASCII and copy it to the user buffer @buf,
- * updating *offset appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Read an ASCII string for a number from a userspace buffer and fill *val on success.
- * Returns 0 on success, < 0 on error.
- */
-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
-
-/** lock for read/write safety */
-extern spinlock_t oprofilefs_lock;
-
-#endif /* OPROFILE_H */
diff --git a/xen/include/asm-ia64/linux/README.origin b/xen/include/asm-ia64/linux/README.origin
deleted file mode 100644
index f6a853de91..0000000000
--- a/xen/include/asm-ia64/linux/README.origin
+++ /dev/null
@@ -1,35 +0,0 @@
-# Source files in this directory are identical copies of linux-2.6.13 files:
-#
-# NOTE: DO NOT commit changes to these files! If a file
-# needs to be changed, move it to ../linux-xen and follow
-# the instructions in the README there.
-
-bcd.h -> linux/include/linux/bcd.h
-bitmap.h -> linux/include/linux/bitmap.h
-bitops.h -> linux/include/linux/bitops.h
-hash.h -> linux/include/linux/hash.h
-initrd.h -> linux/include/linux/initrd.h
-jiffies.h -> linux/include/linux/jiffies.h
-kmalloc_sizes.h -> linux/include/linux/kmalloc_sizes.h
-linkage.h -> linux/include/linux/linkage.h
-notifier.h -> linux/include/linux/notifier.h
-percpu.h -> linux/include/linux/percpu.h
-preempt.h -> linux/include/linux/preempt.h
-seqlock.h -> linux/include/linux/seqlock.h
-stddef.h -> linux/include/linux/stddef.h
-thread_info.h -> linux/include/linux/thread_info.h
-time.h -> linux/include/linux/time.h
-timex.h -> linux/include/linux/timex.h
-topology.h -> linux/include/linux/topology.h
-wait.h -> linux/include/linux/wait.h
-
-# The files below are from Linux-2.6.19
-ioport.h -> linux/include/linux/ioport.h
-klist.h -> linux/include/linux/klist.h
-kref.h -> linux/include/linux/kref.h
-mod_devicetable.h -> linux/include/linux/mod_devicetable.h
-pci_ids.h -> linux/include/linux/pci_ids.h
-pci_regs.h -> linux/include/linux/pci_regs.h
-pm.h -> linux/include/linux/pm.h
-sysfs.h -> linux/include/linux/sysfs.h
-
diff --git a/xen/include/asm-ia64/linux/asm-generic/README.origin b/xen/include/asm-ia64/linux/asm-generic/README.origin
deleted file mode 100644
index 436f7c553f..0000000000
--- a/xen/include/asm-ia64/linux/asm-generic/README.origin
+++ /dev/null
@@ -1,15 +0,0 @@
-# Source files in this directory are identical copies of linux-2.6.13 files:
-#
-# NOTE: DO NOT commit changes to these files! If a file
-# needs to be changed, move it to ../linux-xen and follow
-# the instructions in the README there.
-
-div64.h -> linux/include/asm-generic/div64.h
-ide_iops.h -> linux/include/asm-generic/ide_iops.h
-iomap.h -> linux/include/asm-generic/iomap.h
-pci.h -> linux/include/asm-generic/pci.h
-pgtable.h -> linux/include/asm-generic/pgtable.h
-sections.h -> linux/include/asm-generic/sections.h
-topology.h -> linux/include/asm-generic/topology.h
-unaligned.h -> linux/include/asm-generic/unaligned.h
-vmlinux.lds.h -> linux/include/asm-generic/vmlinux.lds.h
diff --git a/xen/include/asm-ia64/linux/asm-generic/div64.h b/xen/include/asm-ia64/linux/asm-generic/div64.h
deleted file mode 100644
index 8f4e319334..0000000000
--- a/xen/include/asm-ia64/linux/asm-generic/div64.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _ASM_GENERIC_DIV64_H
-#define _ASM_GENERIC_DIV64_H
-/*
- * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
- * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
- *
- * The semantics of do_div() are:
- *
- * uint32_t do_div(uint64_t *n, uint32_t base)
- * {
- * uint32_t remainder = *n % base;
- * *n = *n / base;
- * return remainder;
- * }
- *
- * NOTE: macro parameter n is evaluated multiple times,
- * beware of side effects!
- */
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-
-#if BITS_PER_LONG == 64
-
-# define do_div(n,base) ({ \
- uint32_t __base = (base); \
- uint32_t __rem; \
- __rem = ((uint64_t)(n)) % __base; \
- (n) = ((uint64_t)(n)) / __base; \
- __rem; \
- })
-
-#elif BITS_PER_LONG == 32
-
-extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
-
-/* The unnecessary pointer compare is there
- * to check for type safety (n must be 64bit)
- */
-# define do_div(n,base) ({ \
- uint32_t __base = (base); \
- uint32_t __rem; \
- (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
- if (likely(((n) >> 32) == 0)) { \
- __rem = (uint32_t)(n) % __base; \
- (n) = (uint32_t)(n) / __base; \
- } else \
- __rem = __div64_32(&(n), __base); \
- __rem; \
- })
-
-#else /* BITS_PER_LONG == ?? */
-
-# error do_div() does not yet support the C64
-
-#endif /* BITS_PER_LONG */
-
-#endif /* _ASM_GENERIC_DIV64_H */
diff --git a/xen/include/asm-ia64/linux/asm-generic/ide_iops.h b/xen/include/asm-ia64/linux/asm-generic/ide_iops.h
deleted file mode 100644
index 1b91d06819..0000000000
--- a/xen/include/asm-ia64/linux/asm-generic/ide_iops.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Generic I/O and MEMIO string operations. */
-
-#define __ide_insw insw
-#define __ide_insl insl
-#define __ide_outsw outsw
-#define __ide_outsl outsl
-
-static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u16 *)addr = readw(port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u32 *)addr = readl(port);
- addr += 4;
- }
-}
-
-static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- writew(*(u16 *)addr, port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
-{
- while (count--) {
- writel(*(u32 *)addr, port);
- addr += 4;
- }
-}
diff --git a/xen/include/asm-ia64/linux/asm-generic/iomap.h b/xen/include/asm-ia64/linux/asm-generic/iomap.h
deleted file mode 100644
index cde592fca4..0000000000
--- a/xen/include/asm-ia64/linux/asm-generic/iomap.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef __GENERIC_IO_H
-#define __GENERIC_IO_H
-
-#include <linux/linkage.h>
-#include <asm/byteorder.h>
-
-/*
- * These are the "generic" interfaces for doing new-style
- * memory-mapped or PIO accesses. Architectures may do
- * their own arch-optimized versions, these just act as
- * wrappers around the old-style IO register access functions:
- * read[bwl]/write[bwl]/in[bwl]/out[bwl]
- *
- * Don't include this directly, include it from <asm/io.h>.
- */
-
-/*
- * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
- * access or a MMIO access, these functions don't care. The info is
- * encoded in the hardware mapping set up by the mapping functions
- * (or the cookie itself, depending on implementation and hw).
- *
- * The generic routines just encode the PIO/MMIO as part of the
- * cookie, and coldly assume that the MMIO IO mappings are not
- * in the low address range. Architectures for which this is not
- * true can't use this generic implementation.
- */
-extern unsigned int fastcall ioread8(void __iomem *);
-extern unsigned int fastcall ioread16(void __iomem *);
-extern unsigned int fastcall ioread16be(void __iomem *);
-extern unsigned int fastcall ioread32(void __iomem *);
-extern unsigned int fastcall ioread32be(void __iomem *);
-
-extern void fastcall iowrite8(u8, void __iomem *);
-extern void fastcall iowrite16(u16, void __iomem *);
-extern void fastcall iowrite16be(u16, void __iomem *);
-extern void fastcall iowrite32(u32, void __iomem *);
-extern void fastcall iowrite32be(u32, void __iomem *);
-
-/*
- * "string" versions of the above. Note that they
- * use native byte ordering for the accesses (on
- * the assumption that IO and memory agree on a
- * byte order, and CPU byteorder is irrelevant).
- *
- * They do _not_ update the port address. If you
- * want MMIO that copies stuff laid out in MMIO
- * memory across multiple ports, use "memcpy_toio()"
- * and friends.
- */
-extern void fastcall ioread8_rep(void __iomem *port, void *buf, unsigned long count);
-extern void fastcall ioread16_rep(void __iomem *port, void *buf, unsigned long count);
-extern void fastcall ioread32_rep(void __iomem *port, void *buf, unsigned long count);
-
-extern void fastcall iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
-extern void fastcall iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
-extern void fastcall iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
-
-/* Create a virtual mapping cookie for an IO port range */
-extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
-extern void ioport_unmap(void __iomem *);
-
-/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-
-#endif
diff --git a/xen/include/asm-ia64/linux/asm-generic/pci.h b/xen/include/asm-ia64/linux/asm-generic/pci.h
deleted file mode 100644
index ee1d8b5d81..0000000000
--- a/xen/include/asm-ia64/linux/asm-generic/pci.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * linux/include/asm-generic/pci.h
- *
- * Copyright (C) 2003 Russell King
- */
-#ifndef _ASM_GENERIC_PCI_H
-#define _ASM_GENERIC_PCI_H
-
-/**
- * pcibios_resource_to_bus - convert resource to PCI bus address
- * @dev: device which owns this resource
- * @region: converted bus-centric region (start,end)
- * @res: resource to convert
- *
- * Convert a resource to a PCI device bus address or bus window.
- */
-static inline void
-pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
- struct resource *res)
-{
- region->start = res->start;
- region->end = res->end;
-}
-
-static inline void
-pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
- struct pci_bus_region *region)
-{
- res->start = region->start;
- res->end = region->end;
-}
-
-#define pcibios_scan_all_fns(a, b) 0
-
-#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return channel ? 15 : 14;
-}
-#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
-
-#endif
diff --git a/xen/include/asm-ia64/linux/asm-generic/pgtable.h b/xen/include/asm-ia64/linux/asm-generic/pgtable.h
deleted file mode 100644
index f405935651..0000000000
--- a/xen/include/asm-ia64/linux/asm-generic/pgtable.h
+++ /dev/null
@@ -1,214 +0,0 @@
-#ifndef _ASM_GENERIC_PGTABLE_H
-#define _ASM_GENERIC_PGTABLE_H
-
-#ifndef __HAVE_ARCH_PTEP_ESTABLISH
-/*
- * Establish a new mapping:
- * - flush the old one
- * - update the page tables
- * - inform the TLB about the new one
- *
- * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock.
- *
- * Note: the old pte is known to not be writable, so we don't need to
- * worry about dirty bits etc getting lost.
- */
-#ifndef __HAVE_ARCH_SET_PTE_ATOMIC
-#define ptep_establish(__vma, __address, __ptep, __entry) \
-do { \
- set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
-#else /* __HAVE_ARCH_SET_PTE_ATOMIC */
-#define ptep_establish(__vma, __address, __ptep, __entry) \
-do { \
- set_pte_atomic(__ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
-#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-/*
- * Largely same as above, but only sets the access flags (dirty,
- * accessed, and writable). Furthermore, we know it always gets set
- * to a "more permissive" setting, which allows most architectures
- * to optimize this.
- */
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-do { \
- set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define ptep_test_and_clear_young(__vma, __address, __ptep) \
-({ \
- pte_t __pte = *(__ptep); \
- int r = 1; \
- if (!pte_young(__pte)) \
- r = 0; \
- else \
- set_pte_at((__vma)->vm_mm, (__address), \
- (__ptep), pte_mkold(__pte)); \
- r; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define ptep_clear_flush_young(__vma, __address, __ptep) \
-({ \
- int __young; \
- __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
- if (__young) \
- flush_tlb_page(__vma, __address); \
- __young; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
-({ \
- pte_t __pte = *__ptep; \
- int r = 1; \
- if (!pte_dirty(__pte)) \
- r = 0; \
- else \
- set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
- pte_mkclean(__pte)); \
- r; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
-({ \
- int __dirty; \
- __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
- if (__dirty) \
- flush_tlb_page(__vma, __address); \
- __dirty; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define ptep_get_and_clear(__mm, __address, __ptep) \
-({ \
- pte_t __pte = *(__ptep); \
- pte_clear((__mm), (__address), (__ptep)); \
- __pte; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
-#define ptep_clear_flush(__vma, __address, __ptep) \
-({ \
- pte_t __pte; \
- __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
- flush_tlb_page(__vma, __address); \
- __pte; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
-{
- pte_t old_pte = *ptep;
- set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B) (pte_val(A) == pte_val(B))
-#endif
-
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
-#define page_test_and_clear_dirty(page) (0)
-#define pte_maybe_dirty(pte) pte_dirty(pte)
-#else
-#define pte_maybe_dirty(pte) (1)
-#endif
-
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
-#define page_test_and_clear_young(page) (0)
-#endif
-
-#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
-#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
-#endif
-
-#ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
-#define lazy_mmu_prot_update(pte) do { } while (0)
-#endif
-
-/*
- * When walking page tables, get the address of the next boundary,
- * or the end address of the range if that comes earlier. Although no
- * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
- */
-
-#define pgd_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-
-#ifndef pud_addr_end
-#define pud_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-#endif
-
-#ifndef pmd_addr_end
-#define pmd_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-#endif
-
-#ifndef __ASSEMBLY__
-/*
- * When walking page tables, we usually want to skip any p?d_none entries;
- * and any p?d_bad entries - reporting the error before resetting to none.
- * Do the tests inline, but report and clear the bad entry in mm/memory.c.
- */
-void pgd_clear_bad(pgd_t *);
-void pud_clear_bad(pud_t *);
-void pmd_clear_bad(pmd_t *);
-
-static inline int pgd_none_or_clear_bad(pgd_t *pgd)
-{
- if (pgd_none(*pgd))
- return 1;
- if (unlikely(pgd_bad(*pgd))) {
- pgd_clear_bad(pgd);
- return 1;
- }
- return 0;
-}
-
-static inline int pud_none_or_clear_bad(pud_t *pud)
-{
- if (pud_none(*pud))
- return 1;
- if (unlikely(pud_bad(*pud))) {
- pud_clear_bad(pud);
- return 1;
- }
- return 0;
-}
-
-static inline int pmd_none_or_clear_bad(pmd_t *pmd)
-{
- if (pmd_none(*pmd))
- return 1;
- if (unlikely(pmd_bad(*pmd))) {
- pmd_clear_bad(pmd);
- return 1;
- }
- return 0;
-}
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/xen/include/asm-ia64/linux/asm-generic/sections.h b/xen/include/asm-ia64/linux/asm-generic/sections.h
deleted file mode 100644
index 450eae22c3..0000000000
--- a/xen/include/asm-ia64/linux/asm-generic/sections.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _ASM_GENERIC_SECTIONS_H_
-#define _ASM_GENERIC_SECTIONS_H_
-
-/* References to section boundaries */
-
-extern char _text[], _stext[], _etext[];
-extern char _data[], _sdata[], _edata[];
-extern char __bss_start[], __bss_stop[];
-extern char __init_begin[], __init_end[];
-extern char _sinittext[], _einittext[];
-extern char _sextratext[] __attribute__((weak));
-extern char _eextratext[] __attribute__((weak));
-extern char _end[];
-extern char __per_cpu_start[], __per_cpu_end[];
-
-#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/xen/include/asm-ia64/linux/asm-generic/topology.h b/xen/include/asm-ia64/linux/asm-generic/topology.h
deleted file mode 100644
index 5d9d70cd17..0000000000
--- a/xen/include/asm-ia64/linux/asm-generic/topology.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * linux/include/asm-generic/topology.h
- *
- * Written by: Matthew Dobson, IBM Corporation
- *
- * Copyright (C) 2002, IBM Corp.
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send feedback to <colpatch@us.ibm.com>
- */
-#ifndef _ASM_GENERIC_TOPOLOGY_H
-#define _ASM_GENERIC_TOPOLOGY_H
-
-/* Other architectures wishing to use this simple topology API should fill
- in the below functions as appropriate in their own <asm/topology.h> file. */
-#ifndef cpu_to_node
-#define cpu_to_node(cpu) (0)
-#endif
-#ifndef parent_node
-#define parent_node(node) (0)
-#endif
-#ifndef node_to_cpumask
-#define node_to_cpumask(node) (cpu_online_map)
-#endif
-#ifndef node_to_first_cpu
-#define node_to_first_cpu(node) (0)
-#endif
-#ifndef pcibus_to_node
-#define pcibus_to_node(node) (-1)
-#endif
-
-#ifndef pcibus_to_cpumask
-#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
- CPU_MASK_ALL : \
- node_to_cpumask(pcibus_to_node(bus)) \
- )
-#endif
-
-#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff --git a/xen/include/asm-ia64/linux/asm-generic/unaligned.h b/xen/include/asm-ia64/linux/asm-generic/unaligned.h
deleted file mode 100644
index 6c90f0f36e..0000000000
--- a/xen/include/asm-ia64/linux/asm-generic/unaligned.h
+++ /dev/null
@@ -1,122 +0,0 @@
-#ifndef _ASM_GENERIC_UNALIGNED_H_
-#define _ASM_GENERIC_UNALIGNED_H_
-
-/*
- * For the benefit of those who are trying to port Linux to another
- * architecture, here are some C-language equivalents.
- *
- * This is based almost entirely upon Richard Henderson's
- * asm-alpha/unaligned.h implementation. Some comments were
- * taken from David Mosberger's asm-ia64/unaligned.h header.
- */
-
-#include <linux/types.h>
-
-/*
- * The main single-value unaligned transfer routines.
- */
-#define get_unaligned(ptr) \
- ((__typeof__(*(ptr)))__get_unaligned((ptr), sizeof(*(ptr))))
-#define put_unaligned(x,ptr) \
- __put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr)))
-
-/*
- * This function doesn't actually exist. The idea is that when
- * someone uses the macros below with an unsupported size (datatype),
- * the linker will alert us to the problem via an unresolved reference
- * error.
- */
-extern void bad_unaligned_access_length(void) __attribute__((noreturn));
-
-struct __una_u64 { __u64 x __attribute__((packed)); };
-struct __una_u32 { __u32 x __attribute__((packed)); };
-struct __una_u16 { __u16 x __attribute__((packed)); };
-
-/*
- * Elemental unaligned loads
- */
-
-static inline unsigned long __uldq(const __u64 *addr)
-{
- const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
- return ptr->x;
-}
-
-static inline unsigned long __uldl(const __u32 *addr)
-{
- const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
- return ptr->x;
-}
-
-static inline unsigned long __uldw(const __u16 *addr)
-{
- const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
- return ptr->x;
-}
-
-/*
- * Elemental unaligned stores
- */
-
-static inline void __ustq(__u64 val, __u64 *addr)
-{
- struct __una_u64 *ptr = (struct __una_u64 *) addr;
- ptr->x = val;
-}
-
-static inline void __ustl(__u32 val, __u32 *addr)
-{
- struct __una_u32 *ptr = (struct __una_u32 *) addr;
- ptr->x = val;
-}
-
-static inline void __ustw(__u16 val, __u16 *addr)
-{
- struct __una_u16 *ptr = (struct __una_u16 *) addr;
- ptr->x = val;
-}
-
-#define __get_unaligned(ptr, size) ({ \
- const void *__gu_p = ptr; \
- unsigned long val; \
- switch (size) { \
- case 1: \
- val = *(const __u8 *)__gu_p; \
- break; \
- case 2: \
- val = __uldw(__gu_p); \
- break; \
- case 4: \
- val = __uldl(__gu_p); \
- break; \
- case 8: \
- val = __uldq(__gu_p); \
- break; \
- default: \
- bad_unaligned_access_length(); \
- }; \
- val; \
-})
-
-#define __put_unaligned(val, ptr, size) \
-do { \
- void *__gu_p = ptr; \
- switch (size) { \
- case 1: \
- *(__u8 *)__gu_p = val; \
- break; \
- case 2: \
- __ustw(val, __gu_p); \
- break; \
- case 4: \
- __ustl(val, __gu_p); \
- break; \
- case 8: \
- __ustq(val, __gu_p); \
- break; \
- default: \
- bad_unaligned_access_length(); \
- }; \
-} while(0)
-
-#endif /* _ASM_GENERIC_UNALIGNED_H */
diff --git a/xen/include/asm-ia64/linux/asm-generic/vmlinux.lds.h b/xen/include/asm-ia64/linux/asm-generic/vmlinux.lds.h
deleted file mode 100644
index b3bb326ae5..0000000000
--- a/xen/include/asm-ia64/linux/asm-generic/vmlinux.lds.h
+++ /dev/null
@@ -1,90 +0,0 @@
-#ifndef LOAD_OFFSET
-#define LOAD_OFFSET 0
-#endif
-
-#ifndef VMLINUX_SYMBOL
-#define VMLINUX_SYMBOL(_sym_) _sym_
-#endif
-
-#define RODATA \
- .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
- *(.rodata) *(.rodata.*) \
- *(__vermagic) /* Kernel version magic */ \
- } \
- \
- .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
- *(.rodata1) \
- } \
- \
- /* PCI quirks */ \
- .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
- *(.pci_fixup_early) \
- VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
- *(.pci_fixup_header) \
- VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
- *(.pci_fixup_final) \
- VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
- *(.pci_fixup_enable) \
- VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
- } \
- \
- /* Kernel symbol table: Normal symbols */ \
- __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab) = .; \
- *(__ksymtab) \
- VMLINUX_SYMBOL(__stop___ksymtab) = .; \
- } \
- \
- /* Kernel symbol table: GPL-only symbols */ \
- __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- *(__ksymtab_gpl) \
- VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
- } \
- \
- /* Kernel symbol table: Normal symbols */ \
- __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab) = .; \
- *(__kcrctab) \
- VMLINUX_SYMBOL(__stop___kcrctab) = .; \
- } \
- \
- /* Kernel symbol table: GPL-only symbols */ \
- __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
- *(__kcrctab_gpl) \
- VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
- } \
- \
- /* Kernel symbol table: strings */ \
- __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
- *(__ksymtab_strings) \
- } \
- \
- /* Built-in module parameters. */ \
- __param : AT(ADDR(__param) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___param) = .; \
- *(__param) \
- VMLINUX_SYMBOL(__stop___param) = .; \
- }
-
-#define SECURITY_INIT \
- .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__security_initcall_start) = .; \
- *(.security_initcall.init) \
- VMLINUX_SYMBOL(__security_initcall_end) = .; \
- }
-
-#define SCHED_TEXT \
- VMLINUX_SYMBOL(__sched_text_start) = .; \
- *(.sched.text) \
- VMLINUX_SYMBOL(__sched_text_end) = .;
-
-#define LOCK_TEXT \
- VMLINUX_SYMBOL(__lock_text_start) = .; \
- *(.spinlock.text) \
- VMLINUX_SYMBOL(__lock_text_end) = .;
diff --git a/xen/include/asm-ia64/linux/asm/README.origin b/xen/include/asm-ia64/linux/asm/README.origin
deleted file mode 100644
index ae40a0abb8..0000000000
--- a/xen/include/asm-ia64/linux/asm/README.origin
+++ /dev/null
@@ -1,41 +0,0 @@
-# Source files in this directory are identical copies of linux-2.6.13 files:
-#
-# NOTE: DO NOT commit changes to these files! If a file
-# needs to be changed, move it to ../linux-xen and follow
-# the instructions in the README there.
-
-asmmacro.h -> linux/include/asm-ia64/asmmacro.h
-bitops.h -> linux/include/asm-ia64/bitops.h
-break.h -> linux/include/asm-ia64/break.h
-byteorder.h -> linux/include/asm-ia64/byteorder.h
-cacheflush.h -> linux/include/asm-ia64/cacheflush.h
-checksum.h -> linux/include/asm-ia64/checksum.h
-current.h -> linux/include/asm-ia64/current.h
-delay.h -> linux/include/asm-ia64/delay.h
-div64.h -> linux/include/asm-ia64/div64.h
-dma.h -> linux/include/asm-ia64/dma.h
-fpswa.h -> linux/include/asm-ia64/fpswa.h
-fpu.h -> linux/include/asm-ia64/fpu.h
-hdreg.h -> linux/include/asm-ia64/hdreg.h
-intrinsics.h -> linux/include/asm-ia64/intrinsics.h
-ioctl.h -> linux/include/asm-ia64/ioctl.h
-linkage.h -> linux/include/asm-ia64/linkage.h
-machvec_hpsim.h -> linux/include/asm-ia64/machvec_hpsim.h
-mca.h -> linux/include/asm-ia64/mca.h
-nodedata.h -> linux/include/asm-ia64/nodedate.h
-numnodes.h -> linux/include/asm-ia64/numnodes.h
-param.h -> linux/include/asm-ia64/param.h
-patch.h -> linux/include/asm-ia64/patch.h
-pci.h -> linux/include/asm-ia64/pci.h
-rse.h -> linux/include/asm-ia64/rse.h
-setup.h -> linux/include/asm-ia64/setup.h
-string.h -> linux/include/asm-ia64/string.h
-thread_info.h -> linux/include/asm-ia64/thread_info.h
-timex.h -> linux/include/asm-ia64/timex.h
-topology.h -> linux/include/asm-ia64/topology.h
-unaligned.h -> linux/include/asm-ia64/unaligned.h
-unistd.h -> linux/include/asm-ia64/unistd.h
-unwind.h -> linux/include/asm-ia64/unwind.h
-
-# The files below are from Linux-2.6.19
-machvec_init.h -> linux/include/asm-ia64/machvec_init.h
diff --git a/xen/include/asm-ia64/linux/asm/asmmacro.h b/xen/include/asm-ia64/linux/asm/asmmacro.h
deleted file mode 100644
index 77af457f4a..0000000000
--- a/xen/include/asm-ia64/linux/asm/asmmacro.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef _ASM_IA64_ASMMACRO_H
-#define _ASM_IA64_ASMMACRO_H
-
-/*
- * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/config.h>
-
-#define ENTRY(name) \
- .align 32; \
- .proc name; \
-name:
-
-#define ENTRY_MIN_ALIGN(name) \
- .align 16; \
- .proc name; \
-name:
-
-#define GLOBAL_ENTRY(name) \
- .global name; \
- ENTRY(name)
-
-#define END(name) \
- .endp name
-
-/*
- * Helper macros to make unwind directives more readable:
- */
-
-/* prologue_gr: */
-#define ASM_UNW_PRLG_RP 0x8
-#define ASM_UNW_PRLG_PFS 0x4
-#define ASM_UNW_PRLG_PSP 0x2
-#define ASM_UNW_PRLG_PR 0x1
-#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))
-
-/*
- * Helper macros for accessing user memory.
- */
-
- .section "__ex_table", "a" // declare section & section attributes
- .previous
-
-# define EX(y,x...) \
- .xdata4 "__ex_table", 99f-., y-.; \
- [99:] x
-# define EXCLR(y,x...) \
- .xdata4 "__ex_table", 99f-., y-.+4; \
- [99:] x
-
-/*
- * Mark instructions that need a load of a virtual address patched to be
- * a load of a physical address. We use this either in critical performance
- * path (ivt.S - TLB miss processing) or in places where it might not be
- * safe to use a "tpa" instruction (mca_asm.S - error recovery).
- */
- .section ".data.patch.vtop", "a" // declare section & section attributes
- .previous
-
-#define LOAD_PHYSICAL(pr, reg, obj) \
-[1:](pr)movl reg = obj; \
- .xdata4 ".data.patch.vtop", 1b-.
-
-/*
- * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
- * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
- */
-#define DO_MCKINLEY_E9_WORKAROUND
-
-#ifdef DO_MCKINLEY_E9_WORKAROUND
- .section ".data.patch.mckinley_e9", "a"
- .previous
-/* workaround for Itanium 2 Errata 9: */
-# define FSYS_RETURN \
- .xdata4 ".data.patch.mckinley_e9", 1f-.; \
-1:{ .mib; \
- nop.m 0; \
- mov r16=ar.pfs; \
- br.call.sptk.many b7=2f;; \
- }; \
-2:{ .mib; \
- nop.m 0; \
- mov ar.pfs=r16; \
- br.ret.sptk.many b6;; \
- }
-#else
-# define FSYS_RETURN br.ret.sptk.many b6
-#endif
-
-/*
- * Up until early 2004, use of .align within a function caused bad unwind info.
- * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
- * otherwise.
- */
-#ifdef HAVE_WORKING_TEXT_ALIGN
-# define TEXT_ALIGN(n) .align n
-#else
-# define TEXT_ALIGN(n)
-#endif
-
-#ifdef HAVE_SERIALIZE_DIRECTIVE
-# define dv_serialize_data .serialize.data
-# define dv_serialize_instruction .serialize.instruction
-#else
-# define dv_serialize_data
-# define dv_serialize_instruction
-#endif
-
-#endif /* _ASM_IA64_ASMMACRO_H */
diff --git a/xen/include/asm-ia64/linux/asm/bitops.h b/xen/include/asm-ia64/linux/asm/bitops.h
deleted file mode 100644
index 7232528e2d..0000000000
--- a/xen/include/asm-ia64/linux/asm/bitops.h
+++ /dev/null
@@ -1,423 +0,0 @@
-#ifndef _ASM_IA64_BITOPS_H
-#define _ASM_IA64_BITOPS_H
-
-/*
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1)
- * scheduler patch
- */
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <asm/bitops.h>
-#include <asm/intrinsics.h>
-
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- *
- * The address must be (at least) "long" aligned.
- * Note that there are driver (e.g., eepro100) which use these operations to operate on
- * hw-defined data-structures, so we can't easily change these operations to force a
- * bigger alignment.
- *
- * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
- */
-static __inline__ void
-set_bit (int nr, volatile void *addr)
-{
- __u32 bit, old, new;
- volatile __u32 *m;
- CMPXCHG_BUGCHECK_DECL
-
- m = (volatile __u32 *) addr + (nr >> 5);
- bit = 1 << (nr & 31);
- do {
- CMPXCHG_BUGCHECK(m);
- old = *m;
- new = old | bit;
- } while (cmpxchg_acq(m, old, new) != old);
-}
-
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __inline__ void
-__set_bit (int nr, volatile void *addr)
-{
- *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
-}
-
-/*
- * clear_bit() has "acquire" semantics.
- */
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
-
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
- * in order to ensure changes are visible on other processors.
- */
-static __inline__ void
-clear_bit (int nr, volatile void *addr)
-{
- __u32 mask, old, new;
- volatile __u32 *m;
- CMPXCHG_BUGCHECK_DECL
-
- m = (volatile __u32 *) addr + (nr >> 5);
- mask = ~(1 << (nr & 31));
- do {
- CMPXCHG_BUGCHECK(m);
- old = *m;
- new = old & mask;
- } while (cmpxchg_acq(m, old, new) != old);
-}
-
-/**
- * __clear_bit - Clears a bit in memory (non-atomic version)
- */
-static __inline__ void
-__clear_bit (int nr, volatile void *addr)
-{
- volatile __u32 *p = (__u32 *) addr + (nr >> 5);
- __u32 m = 1 << (nr & 31);
- *p &= ~m;
-}
-
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static __inline__ void
-change_bit (int nr, volatile void *addr)
-{
- __u32 bit, old, new;
- volatile __u32 *m;
- CMPXCHG_BUGCHECK_DECL
-
- m = (volatile __u32 *) addr + (nr >> 5);
- bit = (1 << (nr & 31));
- do {
- CMPXCHG_BUGCHECK(m);
- old = *m;
- new = old ^ bit;
- } while (cmpxchg_acq(m, old, new) != old);
-}
-
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __inline__ void
-__change_bit (int nr, volatile void *addr)
-{
- *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
-}
-
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __inline__ int
-test_and_set_bit (int nr, volatile void *addr)
-{
- __u32 bit, old, new;
- volatile __u32 *m;
- CMPXCHG_BUGCHECK_DECL
-
- m = (volatile __u32 *) addr + (nr >> 5);
- bit = 1 << (nr & 31);
- do {
- CMPXCHG_BUGCHECK(m);
- old = *m;
- new = old | bit;
- } while (cmpxchg_acq(m, old, new) != old);
- return (old & bit) != 0;
-}
-
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static __inline__ int
-__test_and_set_bit (int nr, volatile void *addr)
-{
- __u32 *p = (__u32 *) addr + (nr >> 5);
- __u32 m = 1 << (nr & 31);
- int oldbitset = (*p & m) != 0;
-
- *p |= m;
- return oldbitset;
-}
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __inline__ int
-test_and_clear_bit (int nr, volatile void *addr)
-{
- __u32 mask, old, new;
- volatile __u32 *m;
- CMPXCHG_BUGCHECK_DECL
-
- m = (volatile __u32 *) addr + (nr >> 5);
- mask = ~(1 << (nr & 31));
- do {
- CMPXCHG_BUGCHECK(m);
- old = *m;
- new = old & mask;
- } while (cmpxchg_acq(m, old, new) != old);
- return (old & ~mask) != 0;
-}
-
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static __inline__ int
-__test_and_clear_bit(int nr, volatile void * addr)
-{
- __u32 *p = (__u32 *) addr + (nr >> 5);
- __u32 m = 1 << (nr & 31);
- int oldbitset = *p & m;
-
- *p &= ~m;
- return oldbitset;
-}
-
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __inline__ int
-test_and_change_bit (int nr, volatile void *addr)
-{
- __u32 bit, old, new;
- volatile __u32 *m;
- CMPXCHG_BUGCHECK_DECL
-
- m = (volatile __u32 *) addr + (nr >> 5);
- bit = (1 << (nr & 31));
- do {
- CMPXCHG_BUGCHECK(m);
- old = *m;
- new = old ^ bit;
- } while (cmpxchg_acq(m, old, new) != old);
- return (old & bit) != 0;
-}
-
-/*
- * WARNING: non atomic version.
- */
-static __inline__ int
-__test_and_change_bit (int nr, void *addr)
-{
- __u32 old, bit = (1 << (nr & 31));
- __u32 *m = (__u32 *) addr + (nr >> 5);
-
- old = *m;
- *m = old ^ bit;
- return (old & bit) != 0;
-}
-
-static __inline__ int
-test_bit (int nr, const volatile void *addr)
-{
- return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
-}
-
-/**
- * ffz - find the first zero bit in a long word
- * @x: The long word to find the bit in
- *
- * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if
- * no zero exists, so code should check against ~0UL first...
- */
-static inline unsigned long
-ffz (unsigned long x)
-{
- unsigned long result;
-
- result = ia64_popcnt(x & (~x - 1));
- return result;
-}
-
-/**
- * __ffs - find first bit in word.
- * @x: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static __inline__ unsigned long
-__ffs (unsigned long x)
-{
- unsigned long result;
-
- result = ia64_popcnt((x-1) & ~x);
- return result;
-}
-
-#ifdef __KERNEL__
-
-/*
- * Return bit number of last (most-significant) bit set. Undefined
- * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
- */
-static inline unsigned long
-ia64_fls (unsigned long x)
-{
- long double d = x;
- long exp;
-
- exp = ia64_getf_exp(d);
- return exp - 0xffff;
-}
-
-/*
- * Find the last (most significant) bit set. Returns 0 for x==0 and
- * bits are numbered from 1..32 (e.g., fls(9) == 4).
- */
-static inline int
-fls (int t)
-{
- unsigned long x = t & 0xffffffffu;
-
- if (!x)
- return 0;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
- return ia64_popcnt(x);
-}
-
-/*
- * ffs: find first bit set. This is defined the same way as the libc and compiler builtin
- * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on
- * "int" values only and the result value is the bit number + 1. ffs(0) is defined to
- * return zero.
- */
-#define ffs(x) __builtin_ffs(x)
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-static __inline__ unsigned long
-hweight64 (unsigned long x)
-{
- unsigned long result;
- result = ia64_popcnt(x);
- return result;
-}
-
-#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful)
-#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
-#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
-
-#endif /* __KERNEL__ */
-
-extern int __find_next_zero_bit (const void *addr, unsigned long size,
- unsigned long offset);
-extern int __find_next_bit(const void *addr, unsigned long size,
- unsigned long offset);
-
-#define find_next_zero_bit(addr, size, offset) \
- __find_next_zero_bit((addr), (size), (offset))
-#define find_next_bit(addr, size, offset) \
- __find_next_bit((addr), (size), (offset))
-
-/*
- * The optimizer actually does good code for this case..
- */
-#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
-
-#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
-
-#ifdef __KERNEL__
-
-#define __clear_bit(nr, addr) clear_bit(nr, addr)
-
-#define ext2_set_bit test_and_set_bit
-#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
-#define ext2_clear_bit test_and_clear_bit
-#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
-#define ext2_test_bit test_bit
-#define ext2_find_first_zero_bit find_first_zero_bit
-#define ext2_find_next_zero_bit find_next_zero_bit
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
-
-static inline int
-sched_find_first_bit (unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return 64 + __ffs(b[1]);
- return __ffs(b[2]) + 128;
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_IA64_BITOPS_H */
diff --git a/xen/include/asm-ia64/linux/asm/break.h b/xen/include/asm-ia64/linux/asm/break.h
deleted file mode 100644
index 8167828edc..0000000000
--- a/xen/include/asm-ia64/linux/asm/break.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _ASM_IA64_BREAK_H
-#define _ASM_IA64_BREAK_H
-
-/*
- * IA-64 Linux break numbers.
- *
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-/*
- * OS-specific debug break numbers:
- */
-#define __IA64_BREAK_KDB 0x80100
-#define __IA64_BREAK_KPROBE 0x80200
-#define __IA64_BREAK_JPROBE 0x80300
-
-/*
- * OS-specific break numbers:
- */
-#define __IA64_BREAK_SYSCALL 0x100000
-
-#endif /* _ASM_IA64_BREAK_H */
diff --git a/xen/include/asm-ia64/linux/asm/byteorder.h b/xen/include/asm-ia64/linux/asm/byteorder.h
deleted file mode 100644
index 69bd41d7c2..0000000000
--- a/xen/include/asm-ia64/linux/asm/byteorder.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef _ASM_IA64_BYTEORDER_H
-#define _ASM_IA64_BYTEORDER_H
-
-/*
- * Modified 1998, 1999
- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
- */
-
-#include <asm/types.h>
-#include <asm/intrinsics.h>
-#include <linux/compiler.h>
-
-static __inline__ __attribute_const__ __u64
-__ia64_swab64 (__u64 x)
-{
- __u64 result;
-
- result = ia64_mux1(x, ia64_mux1_rev);
- return result;
-}
-
-static __inline__ __attribute_const__ __u32
-__ia64_swab32 (__u32 x)
-{
- return __ia64_swab64(x) >> 32;
-}
-
-static __inline__ __attribute_const__ __u16
-__ia64_swab16(__u16 x)
-{
- return __ia64_swab64(x) >> 48;
-}
-
-#define __arch__swab64(x) __ia64_swab64(x)
-#define __arch__swab32(x) __ia64_swab32(x)
-#define __arch__swab16(x) __ia64_swab16(x)
-
-#define __BYTEORDER_HAS_U64__
-
-#include <linux/byteorder/little_endian.h>
-
-#endif /* _ASM_IA64_BYTEORDER_H */
diff --git a/xen/include/asm-ia64/linux/asm/cacheflush.h b/xen/include/asm-ia64/linux/asm/cacheflush.h
deleted file mode 100644
index f2dacb4245..0000000000
--- a/xen/include/asm-ia64/linux/asm/cacheflush.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef _ASM_IA64_CACHEFLUSH_H
-#define _ASM_IA64_CACHEFLUSH_H
-
-/*
- * Copyright (C) 2002 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/page-flags.h>
-
-#include <asm/bitops.h>
-#include <asm/page.h>
-
-/*
- * Cache flushing routines. This is the kind of stuff that can be very expensive, so try
- * to avoid them whenever possible.
- */
-
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define flush_icache_page(vma,page) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-#define flush_dcache_page(page) \
-do { \
- clear_bit(PG_arch_1, &(page)->flags); \
-} while (0)
-
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
-extern void flush_icache_range (unsigned long start, unsigned long end);
-
-#define flush_icache_user_range(vma, page, user_addr, len) \
-do { \
- unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
- flush_icache_range(_addr, _addr + (len)); \
-} while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-#endif /* _ASM_IA64_CACHEFLUSH_H */
diff --git a/xen/include/asm-ia64/linux/asm/checksum.h b/xen/include/asm-ia64/linux/asm/checksum.h
deleted file mode 100644
index 1f230ff8ea..0000000000
--- a/xen/include/asm-ia64/linux/asm/checksum.h
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef _ASM_IA64_CHECKSUM_H
-#define _ASM_IA64_CHECKSUM_H
-
-/*
- * Modified 1998, 1999
- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
- */
-
-/*
- * This is a version of ip_compute_csum() optimized for IP headers,
- * which always checksum on 4 octet boundaries.
- */
-extern unsigned short ip_fast_csum (unsigned char * iph, unsigned int ihl);
-
-/*
- * Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit
- * checksum, already complemented
- */
-extern unsigned short int csum_tcpudp_magic (unsigned long saddr,
- unsigned long daddr,
- unsigned short len,
- unsigned short proto,
- unsigned int sum);
-
-extern unsigned int csum_tcpudp_nofold (unsigned long saddr,
- unsigned long daddr,
- unsigned short len,
- unsigned short proto,
- unsigned int sum);
-
-/*
- * Computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-extern unsigned int csum_partial (const unsigned char * buff, int len,
- unsigned int sum);
-
-/*
- * Same as csum_partial, but copies from src while it checksums.
- *
- * Here it is even more important to align src and dst on a 32-bit (or
- * even better 64-bit) boundary.
- */
-extern unsigned int csum_partial_copy_from_user (const char *src, char *dst,
- int len, unsigned int sum,
- int *errp);
-
-extern unsigned int csum_partial_copy_nocheck (const char *src, char *dst,
- int len, unsigned int sum);
-
-/*
- * This routine is used for miscellaneous IP-like checksums, mainly in
- * icmp.c
- */
-extern unsigned short ip_compute_csum (unsigned char *buff, int len);
-
-/*
- * Fold a partial checksum without adding pseudo headers.
- */
-static inline unsigned short
-csum_fold (unsigned int sum)
-{
- sum = (sum & 0xffff) + (sum >> 16);
- sum = (sum & 0xffff) + (sum >> 16);
- return ~sum;
-}
-
-#endif /* _ASM_IA64_CHECKSUM_H */
diff --git a/xen/include/asm-ia64/linux/asm/current.h b/xen/include/asm-ia64/linux/asm/current.h
deleted file mode 100644
index c659f90fbf..0000000000
--- a/xen/include/asm-ia64/linux/asm/current.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _ASM_IA64_CURRENT_H
-#define _ASM_IA64_CURRENT_H
-
-/*
- * Modified 1998-2000
- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
- */
-
-#include <asm/intrinsics.h>
-
-/*
- * In kernel mode, thread pointer (r13) is used to point to the current task
- * structure.
- */
-#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
-
-#endif /* _ASM_IA64_CURRENT_H */
diff --git a/xen/include/asm-ia64/linux/asm/delay.h b/xen/include/asm-ia64/linux/asm/delay.h
deleted file mode 100644
index 57182d6f2b..0000000000
--- a/xen/include/asm-ia64/linux/asm/delay.h
+++ /dev/null
@@ -1,97 +0,0 @@
-#ifndef _ASM_IA64_DELAY_H
-#define _ASM_IA64_DELAY_H
-
-/*
- * Delay routines using a pre-computed "cycles/usec" value.
- *
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/compiler.h>
-
-#include <asm/intrinsics.h>
-#include <asm/processor.h>
-
-static __inline__ void
-ia64_set_itm (unsigned long val)
-{
- ia64_setreg(_IA64_REG_CR_ITM, val);
- ia64_srlz_d();
-}
-
-static __inline__ unsigned long
-ia64_get_itm (void)
-{
- unsigned long result;
-
- result = ia64_getreg(_IA64_REG_CR_ITM);
- ia64_srlz_d();
- return result;
-}
-
-static __inline__ void
-ia64_set_itv (unsigned long val)
-{
- ia64_setreg(_IA64_REG_CR_ITV, val);
- ia64_srlz_d();
-}
-
-static __inline__ unsigned long
-ia64_get_itv (void)
-{
- return ia64_getreg(_IA64_REG_CR_ITV);
-}
-
-static __inline__ void
-ia64_set_itc (unsigned long val)
-{
- ia64_setreg(_IA64_REG_AR_ITC, val);
- ia64_srlz_d();
-}
-
-static __inline__ unsigned long
-ia64_get_itc (void)
-{
- unsigned long result;
-
- result = ia64_getreg(_IA64_REG_AR_ITC);
- ia64_barrier();
-#ifdef CONFIG_ITANIUM
- while (unlikely((__s32) result == -1)) {
- result = ia64_getreg(_IA64_REG_AR_ITC);
- ia64_barrier();
- }
-#endif
- return result;
-}
-
-extern void ia64_delay_loop (unsigned long loops);
-
-static __inline__ void
-__delay (unsigned long loops)
-{
- if (unlikely(loops < 1))
- return;
-
- ia64_delay_loop (loops - 1);
-}
-
-static __inline__ void
-udelay (unsigned long usecs)
-{
- unsigned long start = ia64_get_itc();
- unsigned long cycles = usecs*local_cpu_data->cyc_per_usec;
-
- while (ia64_get_itc() - start < cycles)
- cpu_relax();
-}
-
-#endif /* _ASM_IA64_DELAY_H */
diff --git a/xen/include/asm-ia64/linux/asm/div64.h b/xen/include/asm-ia64/linux/asm/div64.h
deleted file mode 100644
index 6cd978cefb..0000000000
--- a/xen/include/asm-ia64/linux/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/xen/include/asm-ia64/linux/asm/dma.h b/xen/include/asm-ia64/linux/asm/dma.h
deleted file mode 100644
index 3be1b4925e..0000000000
--- a/xen/include/asm-ia64/linux/asm/dma.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _ASM_IA64_DMA_H
-#define _ASM_IA64_DMA_H
-
-/*
- * Copyright (C) 1998-2002 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/config.h>
-
-#include <asm/io.h> /* need byte IO */
-
-extern unsigned long MAX_DMA_ADDRESS;
-
-#ifdef CONFIG_PCI
- extern int isa_dma_bridge_buggy;
-#else
-# define isa_dma_bridge_buggy (0)
-#endif
-
-#define free_dma(x)
-
-#endif /* _ASM_IA64_DMA_H */
diff --git a/xen/include/asm-ia64/linux/asm/fpswa.h b/xen/include/asm-ia64/linux/asm/fpswa.h
deleted file mode 100644
index 62edfceada..0000000000
--- a/xen/include/asm-ia64/linux/asm/fpswa.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef _ASM_IA64_FPSWA_H
-#define _ASM_IA64_FPSWA_H
-
-/*
- * Floating-point Software Assist
- *
- * Copyright (C) 1999 Intel Corporation.
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com>
- */
-
-typedef struct {
- /* 4 * 128 bits */
- unsigned long fp_lp[4*2];
-} fp_state_low_preserved_t;
-
-typedef struct {
- /* 10 * 128 bits */
- unsigned long fp_lv[10 * 2];
-} fp_state_low_volatile_t;
-
-typedef struct {
- /* 16 * 128 bits */
- unsigned long fp_hp[16 * 2];
-} fp_state_high_preserved_t;
-
-typedef struct {
- /* 96 * 128 bits */
- unsigned long fp_hv[96 * 2];
-} fp_state_high_volatile_t;
-
-/**
- * floating point state to be passed to the FP emulation library by
- * the trap/fault handler
- */
-typedef struct {
- unsigned long bitmask_low64;
- unsigned long bitmask_high64;
- fp_state_low_preserved_t *fp_state_low_preserved;
- fp_state_low_volatile_t *fp_state_low_volatile;
- fp_state_high_preserved_t *fp_state_high_preserved;
- fp_state_high_volatile_t *fp_state_high_volatile;
-} fp_state_t;
-
-typedef struct {
- unsigned long status;
- unsigned long err0;
- unsigned long err1;
- unsigned long err2;
-} fpswa_ret_t;
-
-/**
- * function header for the Floating Point software assist
- * library. This function is invoked by the Floating point software
- * assist trap/fault handler.
- */
-typedef fpswa_ret_t (*efi_fpswa_t) (unsigned long trap_type, void *bundle, unsigned long *ipsr,
- unsigned long *fsr, unsigned long *isr, unsigned long *preds,
- unsigned long *ifs, fp_state_t *fp_state);
-
-/**
- * This is the FPSWA library interface as defined by EFI. We need to pass a
- * pointer to the interface itself on a call to the assist library
- */
-typedef struct {
- unsigned int revision;
- unsigned int reserved;
- efi_fpswa_t fpswa;
-} fpswa_interface_t;
-
-extern fpswa_interface_t *fpswa_interface;
-
-#endif /* _ASM_IA64_FPSWA_H */
diff --git a/xen/include/asm-ia64/linux/asm/fpu.h b/xen/include/asm-ia64/linux/asm/fpu.h
deleted file mode 100644
index 3859558ff0..0000000000
--- a/xen/include/asm-ia64/linux/asm/fpu.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef _ASM_IA64_FPU_H
-#define _ASM_IA64_FPU_H
-
-/*
- * Copyright (C) 1998, 1999, 2002, 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <asm/types.h>
-
-/* floating point status register: */
-#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
-#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */
-#define FPSR_TRAP_ZD (1 << 2) /* zero-divide trap disabled */
-#define FPSR_TRAP_OD (1 << 3) /* overflow trap disabled */
-#define FPSR_TRAP_UD (1 << 4) /* underflow trap disabled */
-#define FPSR_TRAP_ID (1 << 5) /* inexact trap disabled */
-#define FPSR_S0(x) ((x) << 6)
-#define FPSR_S1(x) ((x) << 19)
-#define FPSR_S2(x) (__IA64_UL(x) << 32)
-#define FPSR_S3(x) (__IA64_UL(x) << 45)
-
-/* floating-point status field controls: */
-#define FPSF_FTZ (1 << 0) /* flush-to-zero */
-#define FPSF_WRE (1 << 1) /* widest-range exponent */
-#define FPSF_PC(x) (((x) & 0x3) << 2) /* precision control */
-#define FPSF_RC(x) (((x) & 0x3) << 4) /* rounding control */
-#define FPSF_TD (1 << 6) /* trap disabled */
-
-/* floating-point status field flags: */
-#define FPSF_V (1 << 7) /* invalid operation flag */
-#define FPSF_D (1 << 8) /* denormal/unnormal operand flag */
-#define FPSF_Z (1 << 9) /* zero divide (IEEE) flag */
-#define FPSF_O (1 << 10) /* overflow (IEEE) flag */
-#define FPSF_U (1 << 11) /* underflow (IEEE) flag */
-#define FPSF_I (1 << 12) /* inexact (IEEE) flag) */
-
-/* floating-point rounding control: */
-#define FPRC_NEAREST 0x0
-#define FPRC_NEGINF 0x1
-#define FPRC_POSINF 0x2
-#define FPRC_TRUNC 0x3
-
-#define FPSF_DEFAULT (FPSF_PC (0x3) | FPSF_RC (FPRC_NEAREST))
-
-/* This default value is the same as HP-UX uses. Don't change it
- without a very good reason. */
-#define FPSR_DEFAULT (FPSR_TRAP_VD | FPSR_TRAP_DD | FPSR_TRAP_ZD \
- | FPSR_TRAP_OD | FPSR_TRAP_UD | FPSR_TRAP_ID \
- | FPSR_S0 (FPSF_DEFAULT) \
- | FPSR_S1 (FPSF_DEFAULT | FPSF_TD | FPSF_WRE) \
- | FPSR_S2 (FPSF_DEFAULT | FPSF_TD) \
- | FPSR_S3 (FPSF_DEFAULT | FPSF_TD))
-
-# ifndef __ASSEMBLY__
-
-struct ia64_fpreg {
- union {
- unsigned long bits[2];
- long double __dummy; /* force 16-byte alignment */
- } u;
-};
-
-# endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_IA64_FPU_H */
diff --git a/xen/include/asm-ia64/linux/asm/hdreg.h b/xen/include/asm-ia64/linux/asm/hdreg.h
deleted file mode 100644
index 83b5161d26..0000000000
--- a/xen/include/asm-ia64/linux/asm/hdreg.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * linux/include/asm-ia64/hdreg.h
- *
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-#warning this file is obsolete, please do not use it
-
-#ifndef __ASM_IA64_HDREG_H
-#define __ASM_IA64_HDREG_H
-
-typedef unsigned short ide_ioreg_t;
-
-#endif /* __ASM_IA64_HDREG_H */
diff --git a/xen/include/asm-ia64/linux/asm/intrinsics.h b/xen/include/asm-ia64/linux/asm/intrinsics.h
deleted file mode 100644
index 8089f955e5..0000000000
--- a/xen/include/asm-ia64/linux/asm/intrinsics.h
+++ /dev/null
@@ -1,181 +0,0 @@
-#ifndef _ASM_IA64_INTRINSICS_H
-#define _ASM_IA64_INTRINSICS_H
-
-/*
- * Compiler-dependent intrinsics.
- *
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#ifndef __ASSEMBLY__
-#include <linux/config.h>
-
-/* include compiler specific intrinsics */
-#include <asm/ia64regs.h>
-#ifdef __INTEL_COMPILER
-# include <asm/intel_intrin.h>
-#else
-# include <asm/gcc_intrin.h>
-#endif
-
-/*
- * Force an unresolved reference if someone tries to use
- * ia64_fetch_and_add() with a bad value.
- */
-extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
-extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
-
-#define IA64_FETCHADD(tmp,v,n,sz,sem) \
-({ \
- switch (sz) { \
- case 4: \
- tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
- break; \
- \
- case 8: \
- tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
- break; \
- \
- default: \
- __bad_size_for_ia64_fetch_and_add(); \
- } \
-})
-
-#define ia64_fetchadd(i,v,sem) \
-({ \
- __u64 _tmp; \
- volatile __typeof__(*(v)) *_v = (v); \
- /* Can't use a switch () here: gcc isn't always smart enough for that... */ \
- if ((i) == -16) \
- IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \
- else if ((i) == -8) \
- IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \
- else if ((i) == -4) \
- IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \
- else if ((i) == -1) \
- IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \
- else if ((i) == 1) \
- IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \
- else if ((i) == 4) \
- IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \
- else if ((i) == 8) \
- IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \
- else if ((i) == 16) \
- IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \
- else \
- _tmp = __bad_increment_for_ia64_fetch_and_add(); \
- (__typeof__(*(v))) (_tmp); /* return old value */ \
-})
-
-#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
-
-/*
- * This function doesn't exist, so you'll get a linker error if
- * something tries to do an invalid xchg().
- */
-extern void ia64_xchg_called_with_bad_pointer (void);
-
-#define __xchg(x,ptr,size) \
-({ \
- unsigned long __xchg_result; \
- \
- switch (size) { \
- case 1: \
- __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
- break; \
- \
- case 2: \
- __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
- break; \
- \
- case 4: \
- __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
- break; \
- \
- case 8: \
- __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
- break; \
- default: \
- ia64_xchg_called_with_bad_pointer(); \
- } \
- __xchg_result; \
-})
-
-#define xchg(ptr,x) \
- ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid cmpxchg().
- */
-extern long ia64_cmpxchg_called_with_bad_pointer (void);
-
-#define ia64_cmpxchg(sem,ptr,old,new,size) \
-({ \
- __u64 _o_, _r_; \
- \
- switch (size) { \
- case 1: _o_ = (__u8 ) (long) (old); break; \
- case 2: _o_ = (__u16) (long) (old); break; \
- case 4: _o_ = (__u32) (long) (old); break; \
- case 8: _o_ = (__u64) (long) (old); break; \
- default: break; \
- } \
- switch (size) { \
- case 1: \
- _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
- break; \
- \
- case 2: \
- _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
- break; \
- \
- case 4: \
- _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
- break; \
- \
- case 8: \
- _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
- break; \
- \
- default: \
- _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
- break; \
- } \
- (__typeof__(old)) _r_; \
-})
-
-#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
-#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
-
-/* for compatibility with other platforms: */
-#define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n)
-
-#ifdef CONFIG_IA64_DEBUG_CMPXCHG
-# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
-# define CMPXCHG_BUGCHECK(v) \
- do { \
- if (_cmpxchg_bugcheck_count-- <= 0) { \
- void *ip; \
- extern int printk(const char *fmt, ...); \
- ip = (void *) ia64_getreg(_IA64_REG_IP); \
- printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
- break; \
- } \
- } while (0)
-#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
-# define CMPXCHG_BUGCHECK_DECL
-# define CMPXCHG_BUGCHECK(v)
-#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
-
-#endif
-#endif /* _ASM_IA64_INTRINSICS_H */
diff --git a/xen/include/asm-ia64/linux/asm/ioctl.h b/xen/include/asm-ia64/linux/asm/ioctl.h
deleted file mode 100644
index be9cc2403d..0000000000
--- a/xen/include/asm-ia64/linux/asm/ioctl.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef _ASM_IA64_IOCTL_H
-#define _ASM_IA64_IOCTL_H
-
-/*
- * Based on <asm-i386/ioctl.h>.
- *
- * Modified 1998, 1999
- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
- */
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The ia64 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _ASM_IA64_IOCTL_H */
diff --git a/xen/include/asm-ia64/linux/asm/linkage.h b/xen/include/asm-ia64/linux/asm/linkage.h
deleted file mode 100644
index 14cd72cd80..0000000000
--- a/xen/include/asm-ia64/linux/asm/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_LINKAGE_H
-#define __ASM_LINKAGE_H
-
-#define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage))
-
-#endif
diff --git a/xen/include/asm-ia64/linux/asm/machvec_hpsim.h b/xen/include/asm-ia64/linux/asm/machvec_hpsim.h
deleted file mode 100644
index cf72fc87fd..0000000000
--- a/xen/include/asm-ia64/linux/asm/machvec_hpsim.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _ASM_IA64_MACHVEC_HPSIM_h
-#define _ASM_IA64_MACHVEC_HPSIM_h
-
-extern ia64_mv_setup_t hpsim_setup;
-extern ia64_mv_irq_init_t hpsim_irq_init;
-
-/*
- * This stuff has dual use!
- *
- * For a generic kernel, the macros are used to initialize the
- * platform's machvec structure. When compiling a non-generic kernel,
- * the macros are used directly.
- */
-#define platform_name "hpsim"
-#define platform_setup hpsim_setup
-#define platform_irq_init hpsim_irq_init
-
-#endif /* _ASM_IA64_MACHVEC_HPSIM_h */
diff --git a/xen/include/asm-ia64/linux/asm/machvec_init.h b/xen/include/asm-ia64/linux/asm/machvec_init.h
deleted file mode 100644
index 2d36f6840f..0000000000
--- a/xen/include/asm-ia64/linux/asm/machvec_init.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#include <asm/machvec.h>
-
-extern ia64_mv_send_ipi_t ia64_send_ipi;
-extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
-extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
-extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
-extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read;
-extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write;
-
-extern ia64_mv_inb_t __ia64_inb;
-extern ia64_mv_inw_t __ia64_inw;
-extern ia64_mv_inl_t __ia64_inl;
-extern ia64_mv_outb_t __ia64_outb;
-extern ia64_mv_outw_t __ia64_outw;
-extern ia64_mv_outl_t __ia64_outl;
-extern ia64_mv_mmiowb_t __ia64_mmiowb;
-extern ia64_mv_readb_t __ia64_readb;
-extern ia64_mv_readw_t __ia64_readw;
-extern ia64_mv_readl_t __ia64_readl;
-extern ia64_mv_readq_t __ia64_readq;
-extern ia64_mv_readb_t __ia64_readb_relaxed;
-extern ia64_mv_readw_t __ia64_readw_relaxed;
-extern ia64_mv_readl_t __ia64_readl_relaxed;
-extern ia64_mv_readq_t __ia64_readq_relaxed;
-
-#define MACHVEC_HELPER(name) \
- struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
- = MACHVEC_INIT(name);
-
-#define MACHVEC_DEFINE(name) MACHVEC_HELPER(name)
-
-MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME)
diff --git a/xen/include/asm-ia64/linux/asm/mca.h b/xen/include/asm-ia64/linux/asm/mca.h
deleted file mode 100644
index 149ad01184..0000000000
--- a/xen/include/asm-ia64/linux/asm/mca.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * File: mca.h
- * Purpose: Machine check handling specific defines
- *
- * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
- * Copyright (C) Vijay Chander (vijay@engr.sgi.com)
- * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
- * Copyright (C) Russ Anderson (rja@sgi.com)
- */
-
-#ifndef _ASM_IA64_MCA_H
-#define _ASM_IA64_MCA_H
-
-#define IA64_MCA_STACK_SIZE 8192
-
-#if !defined(__ASSEMBLY__)
-
-#include <linux/interrupt.h>
-#include <linux/types.h>
-
-#include <asm/param.h>
-#include <asm/sal.h>
-#include <asm/processor.h>
-#include <asm/mca_asm.h>
-
-#define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */
-
-typedef struct ia64_fptr {
- unsigned long fp;
- unsigned long gp;
-} ia64_fptr_t;
-
-typedef union cmcv_reg_u {
- u64 cmcv_regval;
- struct {
- u64 cmcr_vector : 8;
- u64 cmcr_reserved1 : 4;
- u64 cmcr_ignored1 : 1;
- u64 cmcr_reserved2 : 3;
- u64 cmcr_mask : 1;
- u64 cmcr_ignored2 : 47;
- } cmcv_reg_s;
-
-} cmcv_reg_t;
-
-#define cmcv_mask cmcv_reg_s.cmcr_mask
-#define cmcv_vector cmcv_reg_s.cmcr_vector
-
-enum {
- IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0,
- IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1
-};
-
-/* Information maintained by the MC infrastructure */
-typedef struct ia64_mc_info_s {
- u64 imi_mca_handler;
- size_t imi_mca_handler_size;
- u64 imi_monarch_init_handler;
- size_t imi_monarch_init_handler_size;
- u64 imi_slave_init_handler;
- size_t imi_slave_init_handler_size;
- u8 imi_rendez_checkin[NR_CPUS];
-
-} ia64_mc_info_t;
-
-typedef struct ia64_mca_sal_to_os_state_s {
- u64 imsto_os_gp; /* GP of the os registered with the SAL */
- u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */
- u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */
- u64 imsto_sal_gp; /* GP of the SAL - physical */
- u64 imsto_rendez_state; /* Rendez state information */
- u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going
- * back to SAL from OS after MCA handling.
- */
- u64 pal_min_state; /* from PAL in r17 */
- u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */
-} ia64_mca_sal_to_os_state_t;
-
-enum {
- IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */
- IA64_MCA_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
- IA64_MCA_COLD_BOOT = -2, /* Cold boot of the system need from SAL */
- IA64_MCA_HALT = -3 /* System to be halted by SAL */
-};
-
-enum {
- IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */
- IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */
-};
-
-typedef struct ia64_mca_os_to_sal_state_s {
- u64 imots_os_status; /* OS status to SAL as to what happened
- * with the MCA handling.
- */
- u64 imots_sal_gp; /* GP of the SAL - physical */
- u64 imots_context; /* 0 if return to same context
- 1 if return to new context */
- u64 *imots_new_min_state; /* Pointer to structure containing
- * new values of registers in the min state
- * save area.
- */
- u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going
- * back to SAL from OS after MCA handling.
- */
-} ia64_mca_os_to_sal_state_t;
-
-/* Per-CPU MCA state that is too big for normal per-CPU variables. */
-
-struct ia64_mca_cpu {
- u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */
- u64 proc_state_dump[512];
- u64 stackframe[32];
- u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */
- u64 init_stack[KERNEL_STACK_SIZE/8];
-} __attribute__ ((aligned(16)));
-
-/* Array of physical addresses of each CPU's MCA area. */
-extern unsigned long __per_cpu_mca[NR_CPUS];
-
-extern void ia64_mca_init(void);
-extern void ia64_mca_cpu_init(void *);
-extern void ia64_os_mca_dispatch(void);
-extern void ia64_os_mca_dispatch_end(void);
-extern void ia64_mca_ucmc_handler(void);
-extern void ia64_monarch_init_handler(void);
-extern void ia64_slave_init_handler(void);
-extern void ia64_mca_cmc_vector_setup(void);
-extern int ia64_reg_MCA_extension(void*);
-extern void ia64_unreg_MCA_extension(void);
-
-#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_IA64_MCA_H */
diff --git a/xen/include/asm-ia64/linux/asm/nodedata.h b/xen/include/asm-ia64/linux/asm/nodedata.h
deleted file mode 100644
index 9978c7ce75..0000000000
--- a/xen/include/asm-ia64/linux/asm/nodedata.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved.
- * Copyright (c) 2002 NEC Corp.
- * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
- * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
- */
-#ifndef _ASM_IA64_NODEDATA_H
-#define _ASM_IA64_NODEDATA_H
-
-#include <linux/config.h>
-#include <linux/numa.h>
-
-#include <asm/percpu.h>
-#include <asm/mmzone.h>
-
-#ifdef CONFIG_NUMA
-
-/*
- * Node Data. One of these structures is located on each node of a NUMA system.
- */
-
-struct pglist_data;
-struct ia64_node_data {
- short active_cpu_count;
- short node;
- struct pglist_data *pg_data_ptrs[MAX_NUMNODES];
-};
-
-
-/*
- * Return a pointer to the node_data structure for the executing cpu.
- */
-#define local_node_data (local_cpu_data->node_data)
-
-/*
- * Given a node id, return a pointer to the pg_data_t for the node.
- *
- * NODE_DATA - should be used in all code not related to system
- * initialization. It uses pernode data structures to minimize
- * offnode memory references. However, these structure are not
- * present during boot. This macro can be used once cpu_init
- * completes.
- */
-#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid])
-
-#endif /* CONFIG_NUMA */
-
-#endif /* _ASM_IA64_NODEDATA_H */
diff --git a/xen/include/asm-ia64/linux/asm/numnodes.h b/xen/include/asm-ia64/linux/asm/numnodes.h
deleted file mode 100644
index 21cff4da54..0000000000
--- a/xen/include/asm-ia64/linux/asm/numnodes.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_MAX_NUMNODES_H
-#define _ASM_MAX_NUMNODES_H
-
-#ifdef CONFIG_IA64_DIG
-/* Max 8 Nodes */
-#define NODES_SHIFT 3
-#elif defined(CONFIG_IA64_HP_ZX1) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
-/* Max 32 Nodes */
-#define NODES_SHIFT 5
-#elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
-/* Max 256 Nodes */
-#define NODES_SHIFT 8
-#endif
-
-#endif /* _ASM_MAX_NUMNODES_H */
diff --git a/xen/include/asm-ia64/linux/asm/param.h b/xen/include/asm-ia64/linux/asm/param.h
deleted file mode 100644
index 5e1e0d2d7b..0000000000
--- a/xen/include/asm-ia64/linux/asm/param.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef _ASM_IA64_PARAM_H
-#define _ASM_IA64_PARAM_H
-
-/*
- * Fundamental kernel parameters.
- *
- * Based on <asm-i386/param.h>.
- *
- * Modified 1998, 1999, 2002-2003
- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
- */
-
-#define EXEC_PAGESIZE 65536
-
-#ifndef NOGROUP
-# define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
-#ifdef __KERNEL__
-# include <linux/config.h> /* mustn't include <linux/config.h> outside of #ifdef __KERNEL__ */
-# ifdef CONFIG_IA64_HP_SIM
- /*
- * Yeah, simulating stuff is slow, so let us catch some breath between
- * timer interrupts...
- */
-# define HZ 32
-# else
-# define HZ CONFIG_HZ
-# endif
-# define USER_HZ HZ
-# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
-#else
- /*
- * Technically, this is wrong, but some old apps still refer to it. The proper way to
- * get the HZ value is via sysconf(_SC_CLK_TCK).
- */
-# define HZ 1024
-#endif
-
-#endif /* _ASM_IA64_PARAM_H */
diff --git a/xen/include/asm-ia64/linux/asm/patch.h b/xen/include/asm-ia64/linux/asm/patch.h
deleted file mode 100644
index 4797f3535e..0000000000
--- a/xen/include/asm-ia64/linux/asm/patch.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _ASM_IA64_PATCH_H
-#define _ASM_IA64_PATCH_H
-
-/*
- * Copyright (C) 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * There are a number of reasons for patching instructions. Rather than duplicating code
- * all over the place, we put the common stuff here. Reasons for patching: in-kernel
- * module-loader, virtual-to-physical patch-list, McKinley Errata 9 workaround, and gate
- * shared library. Undoubtedly, some of these reasons will disappear and others will
- * be added over time.
- */
-#include <linux/elf.h>
-#include <linux/types.h>
-
-extern void ia64_patch (u64 insn_addr, u64 mask, u64 val); /* patch any insn slot */
-extern void ia64_patch_imm64 (u64 insn_addr, u64 val); /* patch "movl" w/abs. value*/
-extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel value */
-
-extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
-extern void ia64_patch_vtop (unsigned long start, unsigned long end);
-extern void ia64_patch_gate (void);
-
-#endif /* _ASM_IA64_PATCH_H */
diff --git a/xen/include/asm-ia64/linux/asm/rse.h b/xen/include/asm-ia64/linux/asm/rse.h
deleted file mode 100644
index 02830a3b01..0000000000
--- a/xen/include/asm-ia64/linux/asm/rse.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef _ASM_IA64_RSE_H
-#define _ASM_IA64_RSE_H
-
-/*
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Register stack engine related helper functions. This file may be
- * used in applications, so be careful about the name-space and give
- * some consideration to non-GNU C compilers (though __inline__ is
- * fine).
- */
-
-static __inline__ unsigned long
-ia64_rse_slot_num (unsigned long *addr)
-{
- return (((unsigned long) addr) >> 3) & 0x3f;
-}
-
-/*
- * Return TRUE if ADDR is the address of an RNAT slot.
- */
-static __inline__ unsigned long
-ia64_rse_is_rnat_slot (unsigned long *addr)
-{
- return ia64_rse_slot_num(addr) == 0x3f;
-}
-
-/*
- * Returns the address of the RNAT slot that covers the slot at
- * address SLOT_ADDR.
- */
-static __inline__ unsigned long *
-ia64_rse_rnat_addr (unsigned long *slot_addr)
-{
- return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3));
-}
-
-/*
- * Calculate the number of registers in the dirty partition starting at BSPSTORE and
- * ending at BSP. This isn't simply (BSP-BSPSTORE)/8 because every 64th slot stores
- * ar.rnat.
- */
-static __inline__ unsigned long
-ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp)
-{
- unsigned long slots = (bsp - bspstore);
-
- return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40;
-}
-
-/*
- * The inverse of the above: given bspstore and the number of
- * registers, calculate ar.bsp.
- */
-static __inline__ unsigned long *
-ia64_rse_skip_regs (unsigned long *addr, long num_regs)
-{
- long delta = ia64_rse_slot_num(addr) + num_regs;
-
- if (num_regs < 0)
- delta -= 0x3e;
- return addr + num_regs + delta/0x3f;
-}
-
-#endif /* _ASM_IA64_RSE_H */
diff --git a/xen/include/asm-ia64/linux/asm/setup.h b/xen/include/asm-ia64/linux/asm/setup.h
deleted file mode 100644
index ea29b57aff..0000000000
--- a/xen/include/asm-ia64/linux/asm/setup.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __IA64_SETUP_H
-#define __IA64_SETUP_H
-
-#define COMMAND_LINE_SIZE 512
-
-#endif
diff --git a/xen/include/asm-ia64/linux/asm/sn/README.origin b/xen/include/asm-ia64/linux/asm/sn/README.origin
deleted file mode 100644
index 495171ee81..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/README.origin
+++ /dev/null
@@ -1,24 +0,0 @@
-# Source files in this directory are identical copies of linux-2.6.19 files:
-#
-# NOTE: DO NOT commit changes to these files! If a file
-# needs to be changed, move it to ../linux-xen and follow
-# the instructions in the README there.
-
-geo.h -> linux/include/asm-ia64/sn/geo.h
-klconfig.h -> linux/include/asm-ia64/sn/klconfig.h
-l1.h -> linux/include/asm-ia64/sn/l1.h
-leds.h -> linux/include/asm-ia64/sn/leds.h
-module.h -> linux/include/asm-ia64/sn/module.h
-pcibus_provider_defs.h -> linux/include/asm-ia64/sn/pcibus_provider_defs.h
-pda.h -> linux/include/asm-ia64/sn/pda.h
-pic.h -> linux/include/asm-ia64/sn/pic.h
-shub_mmr.h -> linux/include/asm-ia64/sn/shub_mmr.h
-shubio.h -> linux/include/asm-ia64/sn/shubio.h
-simulator.h -> linux/include/asm-ia64/sn/simulator.h
-sn_cpuid.h -> linux/include/asm-ia64/sn/sn_cpuid.h
-sn_feature_sets.h -> linux/include/asm-ia64/sn/sn_feature_sets.h
-tiocp.h -> linux/include/asm-ia64/sn/tiocp.h
-xbow.h -> linux/arch/ia64/sn/include/xtalk/xbow.h
-xwidgetdev.h -> linux/arch/ia64/sn/include/xtalk/xwidgetdev.h
-# from 2.6.20
-sn_sal.h -> linux/include/asm-ia64/sn/sn_sal.h
diff --git a/xen/include/asm-ia64/linux/asm/sn/geo.h b/xen/include/asm-ia64/linux/asm/sn/geo.h
deleted file mode 100644
index f083c94340..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/geo.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_GEO_H
-#define _ASM_IA64_SN_GEO_H
-
-/* The geoid_t implementation below is based loosely on the pcfg_t
- implementation in sys/SN/promcfg.h. */
-
-/* Type declaractions */
-
-/* Size of a geoid_t structure (must be before decl. of geoid_u) */
-#define GEOID_SIZE 8 /* Would 16 be better? The size can
- be different on different platforms. */
-
-#define MAX_SLOTS 0xf /* slots per module */
-#define MAX_SLABS 0xf /* slabs per slot */
-
-typedef unsigned char geo_type_t;
-
-/* Fields common to all substructures */
-typedef struct geo_common_s {
- moduleid_t module; /* The module (box) this h/w lives in */
- geo_type_t type; /* What type of h/w is named by this geoid_t */
- slabid_t slab:4; /* slab (ASIC), 0 .. 15 within slot */
- slotid_t slot:4; /* slot (Blade), 0 .. 15 within module */
-} geo_common_t;
-
-/* Additional fields for particular types of hardware */
-typedef struct geo_node_s {
- geo_common_t common; /* No additional fields needed */
-} geo_node_t;
-
-typedef struct geo_rtr_s {
- geo_common_t common; /* No additional fields needed */
-} geo_rtr_t;
-
-typedef struct geo_iocntl_s {
- geo_common_t common; /* No additional fields needed */
-} geo_iocntl_t;
-
-typedef struct geo_pcicard_s {
- geo_iocntl_t common;
- char bus; /* Bus/widget number */
- char slot; /* PCI slot number */
-} geo_pcicard_t;
-
-/* Subcomponents of a node */
-typedef struct geo_cpu_s {
- geo_node_t node;
- char slice; /* Which CPU on the node */
-} geo_cpu_t;
-
-typedef struct geo_mem_s {
- geo_node_t node;
- char membus; /* The memory bus on the node */
- char memslot; /* The memory slot on the bus */
-} geo_mem_t;
-
-
-typedef union geoid_u {
- geo_common_t common;
- geo_node_t node;
- geo_iocntl_t iocntl;
- geo_pcicard_t pcicard;
- geo_rtr_t rtr;
- geo_cpu_t cpu;
- geo_mem_t mem;
- char padsize[GEOID_SIZE];
-} geoid_t;
-
-
-/* Preprocessor macros */
-
-#define GEO_MAX_LEN 48 /* max. formatted length, plus some pad:
- module/001c07/slab/5/node/memory/2/slot/4 */
-
-/* Values for geo_type_t */
-#define GEO_TYPE_INVALID 0
-#define GEO_TYPE_MODULE 1
-#define GEO_TYPE_NODE 2
-#define GEO_TYPE_RTR 3
-#define GEO_TYPE_IOCNTL 4
-#define GEO_TYPE_IOCARD 5
-#define GEO_TYPE_CPU 6
-#define GEO_TYPE_MEM 7
-#define GEO_TYPE_MAX (GEO_TYPE_MEM+1)
-
-/* Parameter for hwcfg_format_geoid_compt() */
-#define GEO_COMPT_MODULE 1
-#define GEO_COMPT_SLAB 2
-#define GEO_COMPT_IOBUS 3
-#define GEO_COMPT_IOSLOT 4
-#define GEO_COMPT_CPU 5
-#define GEO_COMPT_MEMBUS 6
-#define GEO_COMPT_MEMSLOT 7
-
-#define GEO_INVALID_STR "<invalid>"
-
-#define INVALID_NASID ((nasid_t)-1)
-#define INVALID_CNODEID ((cnodeid_t)-1)
-#define INVALID_PNODEID ((pnodeid_t)-1)
-#define INVALID_SLAB (slabid_t)-1
-#define INVALID_SLOT (slotid_t)-1
-#define INVALID_MODULE ((moduleid_t)-1)
-
-static inline slabid_t geo_slab(geoid_t g)
-{
- return (g.common.type == GEO_TYPE_INVALID) ?
- INVALID_SLAB : g.common.slab;
-}
-
-static inline slotid_t geo_slot(geoid_t g)
-{
- return (g.common.type == GEO_TYPE_INVALID) ?
- INVALID_SLOT : g.common.slot;
-}
-
-static inline moduleid_t geo_module(geoid_t g)
-{
- return (g.common.type == GEO_TYPE_INVALID) ?
- INVALID_MODULE : g.common.module;
-}
-
-extern geoid_t cnodeid_get_geoid(cnodeid_t cnode);
-
-#endif /* _ASM_IA64_SN_GEO_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/klconfig.h b/xen/include/asm-ia64/linux/asm/sn/klconfig.h
deleted file mode 100644
index bcbf209d63..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/klconfig.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Derived from IRIX <sys/SN/klconfig.h>.
- *
- * Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (C) 1999 by Ralf Baechle
- */
-#ifndef _ASM_IA64_SN_KLCONFIG_H
-#define _ASM_IA64_SN_KLCONFIG_H
-
-/*
- * The KLCONFIG structures store info about the various BOARDs found
- * during Hardware Discovery. In addition, it stores info about the
- * components found on the BOARDs.
- */
-
-typedef s32 klconf_off_t;
-
-
-/* Functions/macros needed to use this structure */
-
-typedef struct kl_config_hdr {
- char pad[20];
- klconf_off_t ch_board_info; /* the link list of boards */
- char pad0[88];
-} kl_config_hdr_t;
-
-
-#define NODE_OFFSET_TO_LBOARD(nasid,off) (lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off)))
-
-/*
- * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
- * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to
- * the LOCAL/current NODE. REMOTE means it is attached to a different
- * node.(TBD - Need a way to treat ROUTER boards.)
- *
- * There are 2 different structures to represent these boards -
- * lboard - Local board, rboard - remote board. These 2 structures
- * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
- * Figure below). The first byte of the rboard or lboard structure
- * is used to find out its type - no unions are used.
- * If it is a lboard, then the config info of this board will be found
- * on the local node. (LOCAL NODE BASE + offset value gives pointer to
- * the structure.
- * If it is a rboard, the local structure contains the node number
- * and the offset of the beginning of the LINKED LIST on the remote node.
- * The details of the hardware on a remote node can be built locally,
- * if required, by reading the LINKED LIST on the remote node and
- * ignoring all the rboards on that node.
- *
- * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the
- * First board info on the remote node. The remote node list is
- * traversed as the local list, using the REMOTE BASE ADDRESS and not
- * the local base address and ignoring all rboard values.
- *
- *
- KLCONFIG
-
- +------------+ +------------+ +------------+ +------------+
- | lboard | +-->| lboard | +-->| rboard | +-->| lboard |
- +------------+ | +------------+ | +------------+ | +------------+
- | board info | | | board info | | |errinfo,bptr| | | board info |
- +------------+ | +------------+ | +------------+ | +------------+
- | offset |--+ | offset |--+ | offset |--+ |offset=NULL |
- +------------+ +------------+ +------------+ +------------+
-
-
- +------------+
- | board info |
- +------------+ +--------------------------------+
- | compt 1 |------>| type, rev, diaginfo, size ... | (CPU)
- +------------+ +--------------------------------+
- | compt 2 |--+
- +------------+ | +--------------------------------+
- | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
- +------------+ +--------------------------------+
- | errinfo |--+
- +------------+ | +--------------------------------+
- +--->|r/l brd errinfo,compt err flags |
- +--------------------------------+
-
- *
- * Each BOARD consists of COMPONENTs and the BOARD structure has
- * pointers (offsets) to its COMPONENT structure.
- * The COMPONENT structure has version info, size and speed info, revision,
- * error info and the NIC info. This structure can accommodate any
- * BOARD with arbitrary COMPONENT composition.
- *
- * The ERRORINFO part of each BOARD has error information
- * that describes errors about the BOARD itself. It also has flags to
- * indicate the COMPONENT(s) on the board that have errors. The error
- * information specific to the COMPONENT is present in the respective
- * COMPONENT structure.
- *
- * The ERRORINFO structure is also treated like a COMPONENT, ie. the
- * BOARD has pointers(offset) to the ERRORINFO structure. The rboard
- * structure also has a pointer to the ERRORINFO structure. This is
- * the place to store ERRORINFO about a REMOTE NODE, if the HUB on
- * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where
- * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
- * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info
- * which is present on the REMOTE NODE.(TBD)
- * REMOTE ERRINFO can be stored on any of the nearest nodes
- * or on all the nearest nodes.(TBD)
- * Like BOARD structures, REMOTE ERRINFO structures can be built locally
- * using the rboard errinfo pointer.
- *
- * In order to get useful information from this Data organization, a set of
- * interface routines are provided (TBD). The important thing to remember while
- * manipulating the structures, is that, the NODE number information should
- * be used. If the NODE is non-zero (remote) then each offset should
- * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR.
- * This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
- *
- * Note that these structures do not provide much info about connectivity.
- * That info will be part of HWGRAPH, which is an extension of the cfg_t
- * data structure. (ref IP27prom/cfg.h) It has to be extended to include
- * the IO part of the Network(TBD).
- *
- * The data structures below define the above concepts.
- */
-
-
-/*
- * BOARD classes
- */
-
-#define KLCLASS_MASK 0xf0
-#define KLCLASS_NONE 0x00
-#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
-#define KLCLASS_CPU KLCLASS_NODE
-#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
- and the non-graphics widget boards */
-#define KLCLASS_ROUTER 0x30 /* Router board */
-#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
- so that we can record error info */
-#define KLCLASS_IOBRICK 0x70 /* IP35 iobrick */
-#define KLCLASS_MAX 8 /* Bump this if a new CLASS is added */
-
-#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
-
-
-/*
- * board types
- */
-
-#define KLTYPE_MASK 0x0f
-#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
-
-#define KLTYPE_SNIA (KLCLASS_CPU | 0x1)
-#define KLTYPE_TIO (KLCLASS_CPU | 0x2)
-
-#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
-#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
-#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4)
-
-#define KLTYPE_IOBRICK_XBOW (KLCLASS_MIDPLANE | 0x2)
-
-#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0)
-#define KLTYPE_NBRICK (KLCLASS_IOBRICK | 0x4)
-#define KLTYPE_PXBRICK (KLCLASS_IOBRICK | 0x6)
-#define KLTYPE_IXBRICK (KLCLASS_IOBRICK | 0x7)
-#define KLTYPE_CGBRICK (KLCLASS_IOBRICK | 0x8)
-#define KLTYPE_OPUSBRICK (KLCLASS_IOBRICK | 0x9)
-#define KLTYPE_SABRICK (KLCLASS_IOBRICK | 0xa)
-#define KLTYPE_IABRICK (KLCLASS_IOBRICK | 0xb)
-#define KLTYPE_PABRICK (KLCLASS_IOBRICK | 0xc)
-#define KLTYPE_GABRICK (KLCLASS_IOBRICK | 0xd)
-
-
-/*
- * board structures
- */
-
-#define MAX_COMPTS_PER_BRD 24
-
-typedef struct lboard_s {
- klconf_off_t brd_next_any; /* Next BOARD */
- unsigned char struct_type; /* type of structure, local or remote */
- unsigned char brd_type; /* type+class */
- unsigned char brd_sversion; /* version of this structure */
- unsigned char brd_brevision; /* board revision */
- unsigned char brd_promver; /* board prom version, if any */
- unsigned char brd_flags; /* Enabled, Disabled etc */
- unsigned char brd_slot; /* slot number */
- unsigned short brd_debugsw; /* Debug switches */
- geoid_t brd_geoid; /* geo id */
- partid_t brd_partition; /* Partition number */
- unsigned short brd_diagval; /* diagnostic value */
- unsigned short brd_diagparm; /* diagnostic parameter */
- unsigned char brd_inventory; /* inventory history */
- unsigned char brd_numcompts; /* Number of components */
- nic_t brd_nic; /* Number in CAN */
- nasid_t brd_nasid; /* passed parameter */
- klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
- klconf_off_t brd_errinfo; /* Board's error information */
- struct lboard_s *brd_parent; /* Logical parent for this brd */
- char pad0[4];
- unsigned char brd_confidence; /* confidence that the board is bad */
- nasid_t brd_owner; /* who owns this board */
- unsigned char brd_nic_flags; /* To handle 8 more NICs */
- char pad1[24]; /* future expansion */
- char brd_name[32];
- nasid_t brd_next_same_host; /* host of next brd w/same nasid */
- klconf_off_t brd_next_same; /* Next BOARD with same nasid */
-} lboard_t;
-
-/*
- * Generic info structure. This stores common info about a
- * component.
- */
-
-typedef struct klinfo_s { /* Generic info */
- unsigned char struct_type; /* type of this structure */
- unsigned char struct_version; /* version of this structure */
- unsigned char flags; /* Enabled, disabled etc */
- unsigned char revision; /* component revision */
- unsigned short diagval; /* result of diagnostics */
- unsigned short diagparm; /* diagnostic parameter */
- unsigned char inventory; /* previous inventory status */
- unsigned short partid; /* widget part number */
- nic_t nic; /* MUst be aligned properly */
- unsigned char physid; /* physical id of component */
- unsigned int virtid; /* virtual id as seen by system */
- unsigned char widid; /* Widget id - if applicable */
- nasid_t nasid; /* node number - from parent */
- char pad1; /* pad out structure. */
- char pad2; /* pad out structure. */
- void *data;
- klconf_off_t errinfo; /* component specific errors */
- unsigned short pad3; /* pci fields have moved over to */
- unsigned short pad4; /* klbri_t */
-} klinfo_t ;
-
-
-static inline lboard_t *find_lboard_next(lboard_t * brd)
-{
- if (brd && brd->brd_next_any)
- return NODE_OFFSET_TO_LBOARD(NASID_GET(brd), brd->brd_next_any);
- return NULL;
-}
-
-#endif /* _ASM_IA64_SN_KLCONFIG_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/l1.h b/xen/include/asm-ia64/linux/asm/sn/l1.h
deleted file mode 100644
index 344bf44bb3..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/l1.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-#ifndef _ASM_IA64_SN_L1_H
-#define _ASM_IA64_SN_L1_H
-
-/* brick type response codes */
-#define L1_BRICKTYPE_PX 0x23 /* # */
-#define L1_BRICKTYPE_PE 0x25 /* % */
-#define L1_BRICKTYPE_N_p0 0x26 /* & */
-#define L1_BRICKTYPE_IP45 0x34 /* 4 */
-#define L1_BRICKTYPE_IP41 0x35 /* 5 */
-#define L1_BRICKTYPE_TWISTER 0x36 /* 6 */ /* IP53 & ROUTER */
-#define L1_BRICKTYPE_IX 0x3d /* = */
-#define L1_BRICKTYPE_IP34 0x61 /* a */
-#define L1_BRICKTYPE_GA 0x62 /* b */
-#define L1_BRICKTYPE_C 0x63 /* c */
-#define L1_BRICKTYPE_OPUS_TIO 0x66 /* f */
-#define L1_BRICKTYPE_I 0x69 /* i */
-#define L1_BRICKTYPE_N 0x6e /* n */
-#define L1_BRICKTYPE_OPUS 0x6f /* o */
-#define L1_BRICKTYPE_P 0x70 /* p */
-#define L1_BRICKTYPE_R 0x72 /* r */
-#define L1_BRICKTYPE_CHI_CG 0x76 /* v */
-#define L1_BRICKTYPE_X 0x78 /* x */
-#define L1_BRICKTYPE_X2 0x79 /* y */
-#define L1_BRICKTYPE_SA 0x5e /* ^ */
-#define L1_BRICKTYPE_PA 0x6a /* j */
-#define L1_BRICKTYPE_IA 0x6b /* k */
-#define L1_BRICKTYPE_ATHENA 0x2b /* + */
-#define L1_BRICKTYPE_DAYTONA 0x7a /* z */
-#define L1_BRICKTYPE_1932 0x2c /* . */
-#define L1_BRICKTYPE_191010 0x2e /* , */
-
-/* board type response codes */
-#define L1_BOARDTYPE_IP69 0x0100 /* CA */
-#define L1_BOARDTYPE_IP63 0x0200 /* CB */
-#define L1_BOARDTYPE_BASEIO 0x0300 /* IB */
-#define L1_BOARDTYPE_PCIE2SLOT 0x0400 /* IC */
-#define L1_BOARDTYPE_PCIX3SLOT 0x0500 /* ID */
-#define L1_BOARDTYPE_PCIXPCIE4SLOT 0x0600 /* IE */
-#define L1_BOARDTYPE_ABACUS 0x0700 /* AB */
-#define L1_BOARDTYPE_DAYTONA 0x0800 /* AD */
-#define L1_BOARDTYPE_INVAL (-1) /* invalid brick type */
-
-#endif /* _ASM_IA64_SN_L1_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/leds.h b/xen/include/asm-ia64/linux/asm/sn/leds.h
deleted file mode 100644
index 66cf8c4d92..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/leds.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_LEDS_H
-#define _ASM_IA64_SN_LEDS_H
-
-#include <asm/sn/addrs.h>
-#include <asm/sn/pda.h>
-#include <asm/sn/shub_mmr.h>
-
-#define LED0 (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0))
-#define LED_CPU_SHIFT 16
-
-#define LED_CPU_HEARTBEAT 0x01
-#define LED_CPU_ACTIVITY 0x02
-#define LED_ALWAYS_SET 0x00
-
-/*
- * Basic macros for flashing the LEDS on an SGI SN.
- */
-
-static __inline__ void
-set_led_bits(u8 value, u8 mask)
-{
- pda->led_state = (pda->led_state & ~mask) | (value & mask);
- *pda->led_address = (short) pda->led_state;
-}
-
-#endif /* _ASM_IA64_SN_LEDS_H */
-
diff --git a/xen/include/asm-ia64/linux/asm/sn/module.h b/xen/include/asm-ia64/linux/asm/sn/module.h
deleted file mode 100644
index 734e980ece..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/module.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_MODULE_H
-#define _ASM_IA64_SN_MODULE_H
-
-/* parameter for format_module_id() */
-#define MODULE_FORMAT_BRIEF 1
-#define MODULE_FORMAT_LONG 2
-#define MODULE_FORMAT_LCD 3
-
-/*
- * Module id format
- *
- * 31-16 Rack ID (encoded class, group, number - 16-bit unsigned int)
- * 15-8 Brick type (8-bit ascii character)
- * 7-0 Bay (brick position in rack (0-63) - 8-bit unsigned int)
- *
- */
-
-/*
- * Macros for getting the brick type
- */
-#define MODULE_BTYPE_MASK 0xff00
-#define MODULE_BTYPE_SHFT 8
-#define MODULE_GET_BTYPE(_m) (((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT)
-#define MODULE_BT_TO_CHAR(_b) ((char)(_b))
-#define MODULE_GET_BTCHAR(_m) (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
-
-/*
- * Macros for getting the rack ID.
- */
-#define MODULE_RACK_MASK 0xffff0000
-#define MODULE_RACK_SHFT 16
-#define MODULE_GET_RACK(_m) (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
-
-/*
- * Macros for getting the brick position
- */
-#define MODULE_BPOS_MASK 0x00ff
-#define MODULE_BPOS_SHFT 0
-#define MODULE_GET_BPOS(_m) (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
-
-/*
- * Macros for encoding and decoding rack IDs
- * A rack number consists of three parts:
- * class (0==CPU/mixed, 1==I/O), group, number
- *
- * Rack number is stored just as it is displayed on the screen:
- * a 3-decimal-digit number.
- */
-#define RACK_CLASS_DVDR 100
-#define RACK_GROUP_DVDR 10
-#define RACK_NUM_DVDR 1
-
-#define RACK_CREATE_RACKID(_c, _g, _n) ((_c) * RACK_CLASS_DVDR + \
- (_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR)
-
-#define RACK_GET_CLASS(_r) ((_r) / RACK_CLASS_DVDR)
-#define RACK_GET_GROUP(_r) (((_r) - RACK_GET_CLASS(_r) * \
- RACK_CLASS_DVDR) / RACK_GROUP_DVDR)
-#define RACK_GET_NUM(_r) (((_r) - RACK_GET_CLASS(_r) * \
- RACK_CLASS_DVDR - RACK_GET_GROUP(_r) * \
- RACK_GROUP_DVDR) / RACK_NUM_DVDR)
-
-/*
- * Macros for encoding and decoding rack IDs
- * A rack number consists of three parts:
- * class 1 bit, 0==CPU/mixed, 1==I/O
- * group 2 bits for CPU/mixed, 3 bits for I/O
- * number 3 bits for CPU/mixed, 2 bits for I/O (1 based)
- */
-#define RACK_GROUP_BITS(_r) (RACK_GET_CLASS(_r) ? 3 : 2)
-#define RACK_NUM_BITS(_r) (RACK_GET_CLASS(_r) ? 2 : 3)
-
-#define RACK_CLASS_MASK(_r) 0x20
-#define RACK_CLASS_SHFT(_r) 5
-#define RACK_ADD_CLASS(_r, _c) \
- ((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
-
-#define RACK_GROUP_SHFT(_r) RACK_NUM_BITS(_r)
-#define RACK_GROUP_MASK(_r) \
- ( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
-#define RACK_ADD_GROUP(_r, _g) \
- ((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
-
-#define RACK_NUM_SHFT(_r) 0
-#define RACK_NUM_MASK(_r) \
- ( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
-#define RACK_ADD_NUM(_r, _n) \
- ((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
-
-
-/*
- * Brick type definitions
- */
-#define MAX_BRICK_TYPES 256 /* brick type is stored as uchar */
-
-extern char brick_types[];
-
-#define MODULE_CBRICK 0
-#define MODULE_RBRICK 1
-#define MODULE_IBRICK 2
-#define MODULE_KBRICK 3
-#define MODULE_XBRICK 4
-#define MODULE_DBRICK 5
-#define MODULE_PBRICK 6
-#define MODULE_NBRICK 7
-#define MODULE_PEBRICK 8
-#define MODULE_PXBRICK 9
-#define MODULE_IXBRICK 10
-#define MODULE_CGBRICK 11
-#define MODULE_OPUSBRICK 12
-#define MODULE_SABRICK 13 /* TIO BringUp Brick */
-#define MODULE_IABRICK 14
-#define MODULE_PABRICK 15
-#define MODULE_GABRICK 16
-#define MODULE_OPUS_TIO 17 /* OPUS TIO Riser */
-
-extern char brick_types[];
-extern void format_module_id(char *, moduleid_t, int);
-
-#endif /* _ASM_IA64_SN_MODULE_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h b/xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h
deleted file mode 100644
index 8f7c83d0f6..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
-#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
-
-/*
- * SN pci asic types. Do not ever renumber these or reuse values. The
- * values must agree with what prom thinks they are.
- */
-
-#define PCIIO_ASIC_TYPE_UNKNOWN 0
-#define PCIIO_ASIC_TYPE_PPB 1
-#define PCIIO_ASIC_TYPE_PIC 2
-#define PCIIO_ASIC_TYPE_TIOCP 3
-#define PCIIO_ASIC_TYPE_TIOCA 4
-#define PCIIO_ASIC_TYPE_TIOCE 5
-
-#define PCIIO_ASIC_MAX_TYPES 6
-
-/*
- * Common pciio bus provider data. There should be one of these as the
- * first field in any pciio based provider soft structure (e.g. pcibr_soft
- * tioca_soft, etc).
- */
-
-struct pcibus_bussoft {
- u32 bs_asic_type; /* chipset type */
- u32 bs_xid; /* xwidget id */
- u32 bs_persist_busnum; /* Persistent Bus Number */
- u32 bs_persist_segment; /* Segment Number */
- u64 bs_legacy_io; /* legacy io pio addr */
- u64 bs_legacy_mem; /* legacy mem pio addr */
- u64 bs_base; /* widget base */
- struct xwidget_info *bs_xwidget_info;
-};
-
-struct pci_controller;
-/*
- * SN pci bus indirection
- */
-
-struct sn_pcibus_provider {
- dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t, int flags);
- dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags);
- void (*dma_unmap)(struct pci_dev *, dma_addr_t, int);
- void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
- void (*force_interrupt)(struct sn_irq_info *);
- void (*target_interrupt)(struct sn_irq_info *);
-};
-
-/*
- * Flags used by the map interfaces
- * bits 3:0 specifies format of passed in address
- * bit 4 specifies that address is to be used for MSI
- */
-
-#define SN_DMA_ADDRTYPE(x) ((x) & 0xf)
-#define SN_DMA_ADDR_PHYS 1 /* address is an xio address. */
-#define SN_DMA_ADDR_XIO 2 /* address is phys memory */
-#define SN_DMA_MSI 0x10 /* Bus address is to be used for MSI */
-
-extern struct sn_pcibus_provider *sn_pci_provider[];
-#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/pda.h b/xen/include/asm-ia64/linux/asm/sn/pda.h
deleted file mode 100644
index 1c5108d44d..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/pda.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_PDA_H
-#define _ASM_IA64_SN_PDA_H
-
-#include <linux/cache.h>
-#include <asm/percpu.h>
-#include <asm/system.h>
-
-
-/*
- * CPU-specific data structure.
- *
- * One of these structures is allocated for each cpu of a NUMA system.
- *
- * This structure provides a convenient way of keeping together
- * all SN per-cpu data structures.
- */
-
-typedef struct pda_s {
-
- /*
- * Support for SN LEDs
- */
- volatile short *led_address;
- u8 led_state;
- u8 hb_state; /* supports blinking heartbeat leds */
- unsigned int hb_count;
-
- unsigned int idle_flag;
-
- volatile unsigned long *bedrock_rev_id;
- volatile unsigned long *pio_write_status_addr;
- unsigned long pio_write_status_val;
- volatile unsigned long *pio_shub_war_cam_addr;
-
- unsigned long sn_in_service_ivecs[4];
- int sn_lb_int_war_ticks;
- int sn_last_irq;
- int sn_first_irq;
-} pda_t;
-
-
-#define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
-
-/*
- * PDA
- * Per-cpu private data area for each cpu. The PDA is located immediately after
- * the IA64 cpu_data area. A full page is allocated for the cp_data area for each
- * cpu but only a small amout of the page is actually used. We put the SNIA PDA
- * in the same page as the cpu_data area. Note that there is a check in the setup
- * code to verify that we don't overflow the page.
- *
- * Seems like we should should cache-line align the pda so that any changes in the
- * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
- * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
- */
-DECLARE_PER_CPU(struct pda_s, pda_percpu);
-
-#define pda (&__ia64_per_cpu_var(pda_percpu))
-
-#define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
-
-#endif /* _ASM_IA64_SN_PDA_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/pic.h b/xen/include/asm-ia64/linux/asm/sn/pic.h
deleted file mode 100644
index 5f9da5fd6e..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/pic.h
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_PCI_PIC_H
-#define _ASM_IA64_SN_PCI_PIC_H
-
-/*
- * PIC AS DEVICE ZERO
- * ------------------
- *
- * PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC)
- * be designated as 'device 0'. That is a departure from earlier SGI
- * PCI bridges. Because of that we use config space 1 to access the
- * config space of the first actual PCI device on the bus.
- * Here's what the PIC manual says:
- *
- * The current PCI-X bus specification now defines that the parent
- * hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC
- * reduced the total number of devices from 8 to 4 and removed the
- * device registers and windows, now only supporting devices 0,1,2, and
- * 3. PIC did leave all 8 configuration space windows. The reason was
- * there was nothing to gain by removing them. Here in lies the problem.
- * The device numbering we do using 0 through 3 is unrelated to the device
- * numbering which PCI-X requires in configuration space. In the past we
- * correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc.
- * PCI-X requires we start a 1, not 0 and currently the PX brick
- * does associate our:
- *
- * device 0 with configuration space window 1,
- * device 1 with configuration space window 2,
- * device 2 with configuration space window 3,
- * device 3 with configuration space window 4.
- *
- * The net effect is that all config space access are off-by-one with
- * relation to other per-slot accesses on the PIC.
- * Here is a table that shows some of that:
- *
- * Internal Slot#
- * |
- * | 0 1 2 3
- * ----------|---------------------------------------
- * config | 0x21000 0x22000 0x23000 0x24000
- * |
- * even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd
- * |
- * odd rrb | n/a 0[1] n/a 1[1]
- * |
- * int dev | 00 01 10 11
- * |
- * ext slot# | 1 2 3 4
- * ----------|---------------------------------------
- */
-
-#define PIC_ATE_TARGETID_SHFT 8
-#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL
-#define PIC_PCI64_ATTR_TARG_SHFT 60
-
-
-/*****************************************************************************
- *********************** PIC MMR structure mapping ***************************
- *****************************************************************************/
-
-/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0]
- * of a 64-bit register. When writing PIC registers, always write the
- * entire 64 bits.
- */
-
-struct pic {
-
- /* 0x000000-0x00FFFF -- Local Registers */
-
- /* 0x000000-0x000057 -- Standard Widget Configuration */
- u64 p_wid_id; /* 0x000000 */
- u64 p_wid_stat; /* 0x000008 */
- u64 p_wid_err_upper; /* 0x000010 */
- u64 p_wid_err_lower; /* 0x000018 */
- #define p_wid_err p_wid_err_lower
- u64 p_wid_control; /* 0x000020 */
- u64 p_wid_req_timeout; /* 0x000028 */
- u64 p_wid_int_upper; /* 0x000030 */
- u64 p_wid_int_lower; /* 0x000038 */
- #define p_wid_int p_wid_int_lower
- u64 p_wid_err_cmdword; /* 0x000040 */
- u64 p_wid_llp; /* 0x000048 */
- u64 p_wid_tflush; /* 0x000050 */
-
- /* 0x000058-0x00007F -- Bridge-specific Widget Configuration */
- u64 p_wid_aux_err; /* 0x000058 */
- u64 p_wid_resp_upper; /* 0x000060 */
- u64 p_wid_resp_lower; /* 0x000068 */
- #define p_wid_resp p_wid_resp_lower
- u64 p_wid_tst_pin_ctrl; /* 0x000070 */
- u64 p_wid_addr_lkerr; /* 0x000078 */
-
- /* 0x000080-0x00008F -- PMU & MAP */
- u64 p_dir_map; /* 0x000080 */
- u64 _pad_000088; /* 0x000088 */
-
- /* 0x000090-0x00009F -- SSRAM */
- u64 p_map_fault; /* 0x000090 */
- u64 _pad_000098; /* 0x000098 */
-
- /* 0x0000A0-0x0000AF -- Arbitration */
- u64 p_arb; /* 0x0000A0 */
- u64 _pad_0000A8; /* 0x0000A8 */
-
- /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
- u64 p_ate_parity_err; /* 0x0000B0 */
- u64 _pad_0000B8; /* 0x0000B8 */
-
- /* 0x0000C0-0x0000FF -- PCI/GIO */
- u64 p_bus_timeout; /* 0x0000C0 */
- u64 p_pci_cfg; /* 0x0000C8 */
- u64 p_pci_err_upper; /* 0x0000D0 */
- u64 p_pci_err_lower; /* 0x0000D8 */
- #define p_pci_err p_pci_err_lower
- u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */
-
- /* 0x000100-0x0001FF -- Interrupt */
- u64 p_int_status; /* 0x000100 */
- u64 p_int_enable; /* 0x000108 */
- u64 p_int_rst_stat; /* 0x000110 */
- u64 p_int_mode; /* 0x000118 */
- u64 p_int_device; /* 0x000120 */
- u64 p_int_host_err; /* 0x000128 */
- u64 p_int_addr[8]; /* 0x0001{30,,,68} */
- u64 p_err_int_view; /* 0x000170 */
- u64 p_mult_int; /* 0x000178 */
- u64 p_force_always[8]; /* 0x0001{80,,,B8} */
- u64 p_force_pin[8]; /* 0x0001{C0,,,F8} */
-
- /* 0x000200-0x000298 -- Device */
- u64 p_device[4]; /* 0x0002{00,,,18} */
- u64 _pad_000220[4]; /* 0x0002{20,,,38} */
- u64 p_wr_req_buf[4]; /* 0x0002{40,,,58} */
- u64 _pad_000260[4]; /* 0x0002{60,,,78} */
- u64 p_rrb_map[2]; /* 0x0002{80,,,88} */
- #define p_even_resp p_rrb_map[0] /* 0x000280 */
- #define p_odd_resp p_rrb_map[1] /* 0x000288 */
- u64 p_resp_status; /* 0x000290 */
- u64 p_resp_clear; /* 0x000298 */
-
- u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */
-
- /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
- struct {
- u64 upper; /* 0x0003{00,,,F0} */
- u64 lower; /* 0x0003{08,,,F8} */
- } p_buf_addr_match[16];
-
- /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
- struct {
- u64 flush_w_touch; /* 0x000{400,,,5C0} */
- u64 flush_wo_touch; /* 0x000{408,,,5C8} */
- u64 inflight; /* 0x000{410,,,5D0} */
- u64 prefetch; /* 0x000{418,,,5D8} */
- u64 total_pci_retry; /* 0x000{420,,,5E0} */
- u64 max_pci_retry; /* 0x000{428,,,5E8} */
- u64 max_latency; /* 0x000{430,,,5F0} */
- u64 clear_all; /* 0x000{438,,,5F8} */
- } p_buf_count[8];
-
-
- /* 0x000600-0x0009FF -- PCI/X registers */
- u64 p_pcix_bus_err_addr; /* 0x000600 */
- u64 p_pcix_bus_err_attr; /* 0x000608 */
- u64 p_pcix_bus_err_data; /* 0x000610 */
- u64 p_pcix_pio_split_addr; /* 0x000618 */
- u64 p_pcix_pio_split_attr; /* 0x000620 */
- u64 p_pcix_dma_req_err_attr; /* 0x000628 */
- u64 p_pcix_dma_req_err_addr; /* 0x000630 */
- u64 p_pcix_timeout; /* 0x000638 */
-
- u64 _pad_000640[120]; /* 0x000{640,,,9F8} */
-
- /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
- struct {
- u64 p_buf_addr; /* 0x000{A00,,,AF0} */
- u64 p_buf_attr; /* 0X000{A08,,,AF8} */
- } p_pcix_read_buf_64[16];
-
- struct {
- u64 p_buf_addr; /* 0x000{B00,,,BE0} */
- u64 p_buf_attr; /* 0x000{B08,,,BE8} */
- u64 p_buf_valid; /* 0x000{B10,,,BF0} */
- u64 __pad1; /* 0x000{B18,,,BF8} */
- } p_pcix_write_buf_64[8];
-
- /* End of Local Registers -- Start of Address Map space */
-
- char _pad_000c00[0x010000 - 0x000c00];
-
- /* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */
- u64 p_int_ate_ram[1024]; /* 0x010000-0x011fff */
-
- /* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */
- u64 p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */
-
- char _pad_014000[0x18000 - 0x014000];
-
- /* 0x18000-0x197F8 -- PIC Write Request Ram */
- u64 p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
- u64 p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
- u64 p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
-
- char _pad_019800[0x20000 - 0x019800];
-
- /* 0x020000-0x027FFF -- PCI Device Configuration Spaces */
- union {
- u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
- u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
- u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
- u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
- union {
- u8 c[0x100 / 1];
- u16 s[0x100 / 2];
- u32 l[0x100 / 4];
- u64 d[0x100 / 8];
- } f[8];
- } p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */
-
- /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
- union {
- u8 c[0x1000 / 1]; /* 0x028000-0x029000 */
- u16 s[0x1000 / 2]; /* 0x028000-0x029000 */
- u32 l[0x1000 / 4]; /* 0x028000-0x029000 */
- u64 d[0x1000 / 8]; /* 0x028000-0x029000 */
- union {
- u8 c[0x100 / 1];
- u16 s[0x100 / 2];
- u32 l[0x100 / 4];
- u64 d[0x100 / 8];
- } f[8];
- } p_type1_cfg; /* 0x028000-0x029000 */
-
- char _pad_029000[0x030000-0x029000];
-
- /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
- union {
- u8 c[8 / 1];
- u16 s[8 / 2];
- u32 l[8 / 4];
- u64 d[8 / 8];
- } p_pci_iack; /* 0x030000-0x030007 */
-
- char _pad_030007[0x040000-0x030008];
-
- /* 0x040000-0x030007 -- PCIX Special Cycle */
- union {
- u8 c[8 / 1];
- u16 s[8 / 2];
- u32 l[8 / 4];
- u64 d[8 / 8];
- } p_pcix_cycle; /* 0x040000-0x040007 */
-};
-
-#endif /* _ASM_IA64_SN_PCI_PIC_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/shub_mmr.h b/xen/include/asm-ia64/linux/asm/sn/shub_mmr.h
deleted file mode 100644
index 7de1d1d4b7..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/shub_mmr.h
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2001-2005 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_SHUB_MMR_H
-#define _ASM_IA64_SN_SHUB_MMR_H
-
-/* ==================================================================== */
-/* Register "SH_IPI_INT" */
-/* SHub Inter-Processor Interrupt Registers */
-/* ==================================================================== */
-#define SH1_IPI_INT __IA64_UL_CONST(0x0000000110000380)
-#define SH2_IPI_INT __IA64_UL_CONST(0x0000000010000380)
-
-/* SH_IPI_INT_TYPE */
-/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
-#define SH_IPI_INT_TYPE_SHFT 0
-#define SH_IPI_INT_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
-
-/* SH_IPI_INT_AGT */
-/* Description: Agent, must be 0 for SHub */
-#define SH_IPI_INT_AGT_SHFT 3
-#define SH_IPI_INT_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
-
-/* SH_IPI_INT_PID */
-/* Description: Processor ID, same setting as on targeted McKinley */
-#define SH_IPI_INT_PID_SHFT 4
-#define SH_IPI_INT_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
-
-/* SH_IPI_INT_BASE */
-/* Description: Optional interrupt vector area, 2MB aligned */
-#define SH_IPI_INT_BASE_SHFT 21
-#define SH_IPI_INT_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
-
-/* SH_IPI_INT_IDX */
-/* Description: Targeted McKinley interrupt vector */
-#define SH_IPI_INT_IDX_SHFT 52
-#define SH_IPI_INT_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
-
-/* SH_IPI_INT_SEND */
-/* Description: Send Interrupt Message to PI, This generates a puls */
-#define SH_IPI_INT_SEND_SHFT 63
-#define SH_IPI_INT_SEND_MASK __IA64_UL_CONST(0x8000000000000000)
-
-/* ==================================================================== */
-/* Register "SH_EVENT_OCCURRED" */
-/* SHub Interrupt Event Occurred */
-/* ==================================================================== */
-#define SH1_EVENT_OCCURRED __IA64_UL_CONST(0x0000000110010000)
-#define SH1_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000110010008)
-#define SH2_EVENT_OCCURRED __IA64_UL_CONST(0x0000000010010000)
-#define SH2_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000010010008)
-
-/* ==================================================================== */
-/* Register "SH_PI_CAM_CONTROL" */
-/* CRB CAM MMR Access Control */
-/* ==================================================================== */
-#define SH1_PI_CAM_CONTROL __IA64_UL_CONST(0x0000000120050300)
-
-/* ==================================================================== */
-/* Register "SH_SHUB_ID" */
-/* SHub ID Number */
-/* ==================================================================== */
-#define SH1_SHUB_ID __IA64_UL_CONST(0x0000000110060580)
-#define SH1_SHUB_ID_REVISION_SHFT 28
-#define SH1_SHUB_ID_REVISION_MASK __IA64_UL_CONST(0x00000000f0000000)
-
-/* ==================================================================== */
-/* Register "SH_RTC" */
-/* Real-time Clock */
-/* ==================================================================== */
-#define SH1_RTC __IA64_UL_CONST(0x00000001101c0000)
-#define SH2_RTC __IA64_UL_CONST(0x00000002101c0000)
-#define SH_RTC_MASK __IA64_UL_CONST(0x007fffffffffffff)
-
-/* ==================================================================== */
-/* Register "SH_PIO_WRITE_STATUS_0|1" */
-/* PIO Write Status for CPU 0 & 1 */
-/* ==================================================================== */
-#define SH1_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000120070200)
-#define SH1_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000120070280)
-#define SH2_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000020070200)
-#define SH2_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000020070280)
-#define SH2_PIO_WRITE_STATUS_2 __IA64_UL_CONST(0x0000000020070300)
-#define SH2_PIO_WRITE_STATUS_3 __IA64_UL_CONST(0x0000000020070380)
-
-/* SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK */
-/* Description: Deadlock response detected */
-#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT 1
-#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK \
- __IA64_UL_CONST(0x0000000000000002)
-
-/* SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT */
-/* Description: Count of currently pending PIO writes */
-#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_SHFT 56
-#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK \
- __IA64_UL_CONST(0x3f00000000000000)
-
-/* ==================================================================== */
-/* Register "SH_PIO_WRITE_STATUS_0_ALIAS" */
-/* ==================================================================== */
-#define SH1_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000120070208)
-#define SH2_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000020070208)
-
-/* ==================================================================== */
-/* Register "SH_EVENT_OCCURRED" */
-/* SHub Interrupt Event Occurred */
-/* ==================================================================== */
-/* SH_EVENT_OCCURRED_UART_INT */
-/* Description: Pending Junk Bus UART Interrupt */
-#define SH_EVENT_OCCURRED_UART_INT_SHFT 20
-#define SH_EVENT_OCCURRED_UART_INT_MASK __IA64_UL_CONST(0x0000000000100000)
-
-/* SH_EVENT_OCCURRED_IPI_INT */
-/* Description: Pending IPI Interrupt */
-#define SH_EVENT_OCCURRED_IPI_INT_SHFT 28
-#define SH_EVENT_OCCURRED_IPI_INT_MASK __IA64_UL_CONST(0x0000000010000000)
-
-/* SH_EVENT_OCCURRED_II_INT0 */
-/* Description: Pending II 0 Interrupt */
-#define SH_EVENT_OCCURRED_II_INT0_SHFT 29
-#define SH_EVENT_OCCURRED_II_INT0_MASK __IA64_UL_CONST(0x0000000020000000)
-
-/* SH_EVENT_OCCURRED_II_INT1 */
-/* Description: Pending II 1 Interrupt */
-#define SH_EVENT_OCCURRED_II_INT1_SHFT 30
-#define SH_EVENT_OCCURRED_II_INT1_MASK __IA64_UL_CONST(0x0000000040000000)
-
-/* SH2_EVENT_OCCURRED_EXTIO_INT2 */
-/* Description: Pending SHUB 2 EXT IO INT2 */
-#define SH2_EVENT_OCCURRED_EXTIO_INT2_SHFT 33
-#define SH2_EVENT_OCCURRED_EXTIO_INT2_MASK __IA64_UL_CONST(0x0000000200000000)
-
-/* SH2_EVENT_OCCURRED_EXTIO_INT3 */
-/* Description: Pending SHUB 2 EXT IO INT3 */
-#define SH2_EVENT_OCCURRED_EXTIO_INT3_SHFT 34
-#define SH2_EVENT_OCCURRED_EXTIO_INT3_MASK __IA64_UL_CONST(0x0000000400000000)
-
-#define SH_ALL_INT_MASK \
- (SH_EVENT_OCCURRED_UART_INT_MASK | SH_EVENT_OCCURRED_IPI_INT_MASK | \
- SH_EVENT_OCCURRED_II_INT0_MASK | SH_EVENT_OCCURRED_II_INT1_MASK | \
- SH_EVENT_OCCURRED_II_INT1_MASK | SH2_EVENT_OCCURRED_EXTIO_INT2_MASK | \
- SH2_EVENT_OCCURRED_EXTIO_INT3_MASK)
-
-
-/* ==================================================================== */
-/* LEDS */
-/* ==================================================================== */
-#define SH1_REAL_JUNK_BUS_LED0 0x7fed00000UL
-#define SH1_REAL_JUNK_BUS_LED1 0x7fed10000UL
-#define SH1_REAL_JUNK_BUS_LED2 0x7fed20000UL
-#define SH1_REAL_JUNK_BUS_LED3 0x7fed30000UL
-
-#define SH2_REAL_JUNK_BUS_LED0 0xf0000000UL
-#define SH2_REAL_JUNK_BUS_LED1 0xf0010000UL
-#define SH2_REAL_JUNK_BUS_LED2 0xf0020000UL
-#define SH2_REAL_JUNK_BUS_LED3 0xf0030000UL
-
-/* ==================================================================== */
-/* Register "SH1_PTC_0" */
-/* Puge Translation Cache Message Configuration Information */
-/* ==================================================================== */
-#define SH1_PTC_0 __IA64_UL_CONST(0x00000001101a0000)
-
-/* SH1_PTC_0_A */
-/* Description: Type */
-#define SH1_PTC_0_A_SHFT 0
-
-/* SH1_PTC_0_PS */
-/* Description: Page Size */
-#define SH1_PTC_0_PS_SHFT 2
-
-/* SH1_PTC_0_RID */
-/* Description: Region ID */
-#define SH1_PTC_0_RID_SHFT 8
-
-/* SH1_PTC_0_START */
-/* Description: Start */
-#define SH1_PTC_0_START_SHFT 63
-
-/* ==================================================================== */
-/* Register "SH1_PTC_1" */
-/* Puge Translation Cache Message Configuration Information */
-/* ==================================================================== */
-#define SH1_PTC_1 __IA64_UL_CONST(0x00000001101a0080)
-
-/* SH1_PTC_1_START */
-/* Description: PTC_1 Start */
-#define SH1_PTC_1_START_SHFT 63
-
-/* ==================================================================== */
-/* Register "SH2_PTC" */
-/* Puge Translation Cache Message Configuration Information */
-/* ==================================================================== */
-#define SH2_PTC __IA64_UL_CONST(0x0000000170000000)
-
-/* SH2_PTC_A */
-/* Description: Type */
-#define SH2_PTC_A_SHFT 0
-
-/* SH2_PTC_PS */
-/* Description: Page Size */
-#define SH2_PTC_PS_SHFT 2
-
-/* SH2_PTC_RID */
-/* Description: Region ID */
-#define SH2_PTC_RID_SHFT 4
-
-/* SH2_PTC_START */
-/* Description: Start */
-#define SH2_PTC_START_SHFT 63
-
-/* SH2_PTC_ADDR_RID */
-/* Description: Region ID */
-#define SH2_PTC_ADDR_SHFT 4
-#define SH2_PTC_ADDR_MASK __IA64_UL_CONST(0x1ffffffffffff000)
-
-/* ==================================================================== */
-/* Register "SH_RTC1_INT_CONFIG" */
-/* SHub RTC 1 Interrupt Config Registers */
-/* ==================================================================== */
-
-#define SH1_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000110001480)
-#define SH2_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000010001480)
-#define SH_RTC1_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
-#define SH_RTC1_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
-
-/* SH_RTC1_INT_CONFIG_TYPE */
-/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
-#define SH_RTC1_INT_CONFIG_TYPE_SHFT 0
-#define SH_RTC1_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
-
-/* SH_RTC1_INT_CONFIG_AGT */
-/* Description: Agent, must be 0 for SHub */
-#define SH_RTC1_INT_CONFIG_AGT_SHFT 3
-#define SH_RTC1_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
-
-/* SH_RTC1_INT_CONFIG_PID */
-/* Description: Processor ID, same setting as on targeted McKinley */
-#define SH_RTC1_INT_CONFIG_PID_SHFT 4
-#define SH_RTC1_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
-
-/* SH_RTC1_INT_CONFIG_BASE */
-/* Description: Optional interrupt vector area, 2MB aligned */
-#define SH_RTC1_INT_CONFIG_BASE_SHFT 21
-#define SH_RTC1_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
-
-/* SH_RTC1_INT_CONFIG_IDX */
-/* Description: Targeted McKinley interrupt vector */
-#define SH_RTC1_INT_CONFIG_IDX_SHFT 52
-#define SH_RTC1_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
-
-/* ==================================================================== */
-/* Register "SH_RTC1_INT_ENABLE" */
-/* SHub RTC 1 Interrupt Enable Registers */
-/* ==================================================================== */
-
-#define SH1_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000110001500)
-#define SH2_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000010001500)
-#define SH_RTC1_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
-#define SH_RTC1_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
-
-/* SH_RTC1_INT_ENABLE_RTC1_ENABLE */
-/* Description: Enable RTC 1 Interrupt */
-#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_SHFT 0
-#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_MASK \
- __IA64_UL_CONST(0x0000000000000001)
-
-/* ==================================================================== */
-/* Register "SH_RTC2_INT_CONFIG" */
-/* SHub RTC 2 Interrupt Config Registers */
-/* ==================================================================== */
-
-#define SH1_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000110001580)
-#define SH2_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000010001580)
-#define SH_RTC2_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
-#define SH_RTC2_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
-
-/* SH_RTC2_INT_CONFIG_TYPE */
-/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
-#define SH_RTC2_INT_CONFIG_TYPE_SHFT 0
-#define SH_RTC2_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
-
-/* SH_RTC2_INT_CONFIG_AGT */
-/* Description: Agent, must be 0 for SHub */
-#define SH_RTC2_INT_CONFIG_AGT_SHFT 3
-#define SH_RTC2_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
-
-/* SH_RTC2_INT_CONFIG_PID */
-/* Description: Processor ID, same setting as on targeted McKinley */
-#define SH_RTC2_INT_CONFIG_PID_SHFT 4
-#define SH_RTC2_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
-
-/* SH_RTC2_INT_CONFIG_BASE */
-/* Description: Optional interrupt vector area, 2MB aligned */
-#define SH_RTC2_INT_CONFIG_BASE_SHFT 21
-#define SH_RTC2_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
-
-/* SH_RTC2_INT_CONFIG_IDX */
-/* Description: Targeted McKinley interrupt vector */
-#define SH_RTC2_INT_CONFIG_IDX_SHFT 52
-#define SH_RTC2_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
-
-/* ==================================================================== */
-/* Register "SH_RTC2_INT_ENABLE" */
-/* SHub RTC 2 Interrupt Enable Registers */
-/* ==================================================================== */
-
-#define SH1_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000110001600)
-#define SH2_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000010001600)
-#define SH_RTC2_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
-#define SH_RTC2_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
-
-/* SH_RTC2_INT_ENABLE_RTC2_ENABLE */
-/* Description: Enable RTC 2 Interrupt */
-#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_SHFT 0
-#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_MASK \
- __IA64_UL_CONST(0x0000000000000001)
-
-/* ==================================================================== */
-/* Register "SH_RTC3_INT_CONFIG" */
-/* SHub RTC 3 Interrupt Config Registers */
-/* ==================================================================== */
-
-#define SH1_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000110001680)
-#define SH2_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000010001680)
-#define SH_RTC3_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
-#define SH_RTC3_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
-
-/* SH_RTC3_INT_CONFIG_TYPE */
-/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
-#define SH_RTC3_INT_CONFIG_TYPE_SHFT 0
-#define SH_RTC3_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
-
-/* SH_RTC3_INT_CONFIG_AGT */
-/* Description: Agent, must be 0 for SHub */
-#define SH_RTC3_INT_CONFIG_AGT_SHFT 3
-#define SH_RTC3_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
-
-/* SH_RTC3_INT_CONFIG_PID */
-/* Description: Processor ID, same setting as on targeted McKinley */
-#define SH_RTC3_INT_CONFIG_PID_SHFT 4
-#define SH_RTC3_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
-
-/* SH_RTC3_INT_CONFIG_BASE */
-/* Description: Optional interrupt vector area, 2MB aligned */
-#define SH_RTC3_INT_CONFIG_BASE_SHFT 21
-#define SH_RTC3_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
-
-/* SH_RTC3_INT_CONFIG_IDX */
-/* Description: Targeted McKinley interrupt vector */
-#define SH_RTC3_INT_CONFIG_IDX_SHFT 52
-#define SH_RTC3_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
-
-/* ==================================================================== */
-/* Register "SH_RTC3_INT_ENABLE" */
-/* SHub RTC 3 Interrupt Enable Registers */
-/* ==================================================================== */
-
-#define SH1_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000110001700)
-#define SH2_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000010001700)
-#define SH_RTC3_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
-#define SH_RTC3_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
-
-/* SH_RTC3_INT_ENABLE_RTC3_ENABLE */
-/* Description: Enable RTC 3 Interrupt */
-#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_SHFT 0
-#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_MASK \
- __IA64_UL_CONST(0x0000000000000001)
-
-/* SH_EVENT_OCCURRED_RTC1_INT */
-/* Description: Pending RTC 1 Interrupt */
-#define SH_EVENT_OCCURRED_RTC1_INT_SHFT 24
-#define SH_EVENT_OCCURRED_RTC1_INT_MASK __IA64_UL_CONST(0x0000000001000000)
-
-/* SH_EVENT_OCCURRED_RTC2_INT */
-/* Description: Pending RTC 2 Interrupt */
-#define SH_EVENT_OCCURRED_RTC2_INT_SHFT 25
-#define SH_EVENT_OCCURRED_RTC2_INT_MASK __IA64_UL_CONST(0x0000000002000000)
-
-/* SH_EVENT_OCCURRED_RTC3_INT */
-/* Description: Pending RTC 3 Interrupt */
-#define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26
-#define SH_EVENT_OCCURRED_RTC3_INT_MASK __IA64_UL_CONST(0x0000000004000000)
-
-/* ==================================================================== */
-/* Register "SH_IPI_ACCESS" */
-/* CPU interrupt Access Permission Bits */
-/* ==================================================================== */
-
-#define SH1_IPI_ACCESS __IA64_UL_CONST(0x0000000110060480)
-#define SH2_IPI_ACCESS0 __IA64_UL_CONST(0x0000000010060c00)
-#define SH2_IPI_ACCESS1 __IA64_UL_CONST(0x0000000010060c80)
-#define SH2_IPI_ACCESS2 __IA64_UL_CONST(0x0000000010060d00)
-#define SH2_IPI_ACCESS3 __IA64_UL_CONST(0x0000000010060d80)
-
-/* ==================================================================== */
-/* Register "SH_INT_CMPB" */
-/* RTC Compare Value for Processor B */
-/* ==================================================================== */
-
-#define SH1_INT_CMPB __IA64_UL_CONST(0x00000001101b0080)
-#define SH2_INT_CMPB __IA64_UL_CONST(0x00000000101b0080)
-#define SH_INT_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff)
-#define SH_INT_CMPB_INIT __IA64_UL_CONST(0x0000000000000000)
-
-/* SH_INT_CMPB_REAL_TIME_CMPB */
-/* Description: Real Time Clock Compare */
-#define SH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
-#define SH_INT_CMPB_REAL_TIME_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff)
-
-/* ==================================================================== */
-/* Register "SH_INT_CMPC" */
-/* RTC Compare Value for Processor C */
-/* ==================================================================== */
-
-#define SH1_INT_CMPC __IA64_UL_CONST(0x00000001101b0100)
-#define SH2_INT_CMPC __IA64_UL_CONST(0x00000000101b0100)
-#define SH_INT_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff)
-#define SH_INT_CMPC_INIT __IA64_UL_CONST(0x0000000000000000)
-
-/* SH_INT_CMPC_REAL_TIME_CMPC */
-/* Description: Real Time Clock Compare */
-#define SH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
-#define SH_INT_CMPC_REAL_TIME_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff)
-
-/* ==================================================================== */
-/* Register "SH_INT_CMPD" */
-/* RTC Compare Value for Processor D */
-/* ==================================================================== */
-
-#define SH1_INT_CMPD __IA64_UL_CONST(0x00000001101b0180)
-#define SH2_INT_CMPD __IA64_UL_CONST(0x00000000101b0180)
-#define SH_INT_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff)
-#define SH_INT_CMPD_INIT __IA64_UL_CONST(0x0000000000000000)
-
-/* SH_INT_CMPD_REAL_TIME_CMPD */
-/* Description: Real Time Clock Compare */
-#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
-#define SH_INT_CMPD_REAL_TIME_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff)
-
-/* ==================================================================== */
-/* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */
-/* privilege vector for acc=0 */
-/* ==================================================================== */
-#define SH1_MD_DQLP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100030300)
-
-/* ==================================================================== */
-/* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */
-/* privilege vector for acc=0 */
-/* ==================================================================== */
-#define SH1_MD_DQRP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100050300)
-
-/* ==================================================================== */
-/* Some MMRs are functionally identical (or close enough) on both SHUB1 */
-/* and SHUB2 that it makes sense to define a geberic name for the MMR. */
-/* It is acceptible to use (for example) SH_IPI_INT to reference the */
-/* the IPI MMR. The value of SH_IPI_INT is determined at runtime based */
-/* on the type of the SHUB. Do not use these #defines in performance */
-/* critical code or loops - there is a small performance penalty. */
-/* ==================================================================== */
-#define shubmmr(a,b) (is_shub2() ? a##2_##b : a##1_##b)
-
-#define SH_REAL_JUNK_BUS_LED0 shubmmr(SH, REAL_JUNK_BUS_LED0)
-#define SH_IPI_INT shubmmr(SH, IPI_INT)
-#define SH_EVENT_OCCURRED shubmmr(SH, EVENT_OCCURRED)
-#define SH_EVENT_OCCURRED_ALIAS shubmmr(SH, EVENT_OCCURRED_ALIAS)
-#define SH_RTC shubmmr(SH, RTC)
-#define SH_RTC1_INT_CONFIG shubmmr(SH, RTC1_INT_CONFIG)
-#define SH_RTC1_INT_ENABLE shubmmr(SH, RTC1_INT_ENABLE)
-#define SH_RTC2_INT_CONFIG shubmmr(SH, RTC2_INT_CONFIG)
-#define SH_RTC2_INT_ENABLE shubmmr(SH, RTC2_INT_ENABLE)
-#define SH_RTC3_INT_CONFIG shubmmr(SH, RTC3_INT_CONFIG)
-#define SH_RTC3_INT_ENABLE shubmmr(SH, RTC3_INT_ENABLE)
-#define SH_INT_CMPB shubmmr(SH, INT_CMPB)
-#define SH_INT_CMPC shubmmr(SH, INT_CMPC)
-#define SH_INT_CMPD shubmmr(SH, INT_CMPD)
-
-/* ========================================================================== */
-/* Register "SH2_BT_ENG_CSR_0" */
-/* Engine 0 Control and Status Register */
-/* ========================================================================== */
-
-#define SH2_BT_ENG_CSR_0 __IA64_UL_CONST(0x0000000030040000)
-#define SH2_BT_ENG_SRC_ADDR_0 __IA64_UL_CONST(0x0000000030040080)
-#define SH2_BT_ENG_DEST_ADDR_0 __IA64_UL_CONST(0x0000000030040100)
-#define SH2_BT_ENG_NOTIF_ADDR_0 __IA64_UL_CONST(0x0000000030040180)
-
-/* ========================================================================== */
-/* BTE interfaces 1-3 */
-/* ========================================================================== */
-
-#define SH2_BT_ENG_CSR_1 __IA64_UL_CONST(0x0000000030050000)
-#define SH2_BT_ENG_CSR_2 __IA64_UL_CONST(0x0000000030060000)
-#define SH2_BT_ENG_CSR_3 __IA64_UL_CONST(0x0000000030070000)
-
-#endif /* _ASM_IA64_SN_SHUB_MMR_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/shubio.h b/xen/include/asm-ia64/linux/asm/sn/shubio.h
deleted file mode 100644
index 22a6f18a53..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/shubio.h
+++ /dev/null
@@ -1,3358 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_SHUBIO_H
-#define _ASM_IA64_SN_SHUBIO_H
-
-#define HUB_WIDGET_ID_MAX 0xf
-#define IIO_NUM_ITTES 7
-#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
-
-#define IIO_WID 0x00400000 /* Crosstalk Widget Identification */
- /* This register is also accessible from
- * Crosstalk at address 0x0. */
-#define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */
-#define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */
-#define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */
-#define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */
-#define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */
-#define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */
-#define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */
-#define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */
-#define IIO_ILLR 0x00400130 /* IO LLP Log Register */
-#define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */
-
-#define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */
-#define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */
-
-#define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */
-#define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */
-
-#define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */
-#define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */
-#define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */
-#define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */
-#define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */
-#define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */
-#define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */
-
-#define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */
-#define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */
-#define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */
-#define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */
-#define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */
-#define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */
-#define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */
-#define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */
-#define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */
-
-#define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */
-#define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */
-#define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */
-#define IIO_IECLR 0x004001F8 /* IO Error Clear Register */
-#define IIO_IBCR 0x00400200 /* IO BTE Control Register */
-
-#define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */
-#define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */
-
-#define IIO_ILCT 0x00400218 /* IO LLP Channel Test */
-
-#define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */
-#define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */
-
-#define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */
-#define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */
-
-#define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */
-#define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */
-#define IIO_IWC 0x00400250 /* IO Wrapper Control Register */
-#define IIO_IWS 0x00400258 /* IO Wrapper Status Register */
-#define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */
-
-#define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */
-
-#define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */
-#define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */
-#define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */
-#define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */
-#define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */
-#define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */
-#define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */
-#define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */
-
-#define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */
-#define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */
-#define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */
-#define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */
-#define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */
-#define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */
-#define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */
-#define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */
-
-#define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */
-#define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */
-#define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */
-#define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */
-#define IIO_ICMR 0x004003A8 /* IO CRB Management Register */
-#define IIO_ICCR 0x004003B0 /* IO CRB Control Register */
-#define IIO_ICTO 0x004003B8 /* IO CRB Timeout */
-#define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */
-
-#define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */
-#define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */
-#define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */
-#define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */
-#define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */
-
-#define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */
-#define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */
-#define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */
-#define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */
-#define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */
-
-#define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */
-#define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */
-#define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */
-#define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */
-#define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */
-
-#define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */
-#define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */
-#define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */
-#define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */
-#define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */
-
-#define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */
-#define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */
-#define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */
-#define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */
-#define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */
-
-#define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */
-#define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */
-#define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */
-#define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */
-#define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */
-
-#define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */
-#define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */
-#define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */
-#define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */
-#define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */
-
-#define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */
-#define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */
-#define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */
-#define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */
-#define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */
-
-#define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */
-#define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */
-#define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */
-#define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */
-#define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */
-
-#define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */
-#define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */
-#define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */
-#define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */
-#define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */
-
-#define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */
-#define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */
-#define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */
-#define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */
-#define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */
-
-#define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */
-#define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */
-#define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */
-#define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */
-#define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */
-
-#define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */
-#define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */
-#define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */
-#define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */
-#define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */
-
-#define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */
-#define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */
-#define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */
-#define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */
-#define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */
-
-#define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */
-#define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */
-#define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */
-#define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */
-#define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */
-
-#define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */
-#define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */
-#define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */
-
-#define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */
-
-#define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */
-#define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */
-#define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */
-#define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */
-#define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */
-#define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */
-#define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */
-#define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */
-#define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */
-#define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */
-#define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */
-#define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */
-
-#define IIO_IPCR 0x00430000 /* IO Performance Control */
-#define IIO_IPPR 0x00430008 /* IO Performance Profiling */
-
-/************************************************************************
- * *
- * Description: This register echoes some information from the *
- * LB_REV_ID register. It is available through Crosstalk as described *
- * above. The REV_NUM and MFG_NUM fields receive their values from *
- * the REVISION and MANUFACTURER fields in the LB_REV_ID register. *
- * The PART_NUM field's value is the Crosstalk device ID number that *
- * Steve Miller assigned to the SHub chip. *
- * *
- ************************************************************************/
-
-typedef union ii_wid_u {
- u64 ii_wid_regval;
- struct {
- u64 w_rsvd_1:1;
- u64 w_mfg_num:11;
- u64 w_part_num:16;
- u64 w_rev_num:4;
- u64 w_rsvd:32;
- } ii_wid_fld_s;
-} ii_wid_u_t;
-
-/************************************************************************
- * *
- * The fields in this register are set upon detection of an error *
- * and cleared by various mechanisms, as explained in the *
- * description. *
- * *
- ************************************************************************/
-
-typedef union ii_wstat_u {
- u64 ii_wstat_regval;
- struct {
- u64 w_pending:4;
- u64 w_xt_crd_to:1;
- u64 w_xt_tail_to:1;
- u64 w_rsvd_3:3;
- u64 w_tx_mx_rty:1;
- u64 w_rsvd_2:6;
- u64 w_llp_tx_cnt:8;
- u64 w_rsvd_1:8;
- u64 w_crazy:1;
- u64 w_rsvd:31;
- } ii_wstat_fld_s;
-} ii_wstat_u_t;
-
-/************************************************************************
- * *
- * Description: This is a read-write enabled register. It controls *
- * various aspects of the Crosstalk flow control. *
- * *
- ************************************************************************/
-
-typedef union ii_wcr_u {
- u64 ii_wcr_regval;
- struct {
- u64 w_wid:4;
- u64 w_tag:1;
- u64 w_rsvd_1:8;
- u64 w_dst_crd:3;
- u64 w_f_bad_pkt:1;
- u64 w_dir_con:1;
- u64 w_e_thresh:5;
- u64 w_rsvd:41;
- } ii_wcr_fld_s;
-} ii_wcr_u_t;
-
-/************************************************************************
- * *
- * Description: This register's value is a bit vector that guards *
- * access to local registers within the II as well as to external *
- * Crosstalk widgets. Each bit in the register corresponds to a *
- * particular region in the system; a region consists of one, two or *
- * four nodes (depending on the value of the REGION_SIZE field in the *
- * LB_REV_ID register, which is documented in Section 8.3.1.1). The *
- * protection provided by this register applies to PIO read *
- * operations as well as PIO write operations. The II will perform a *
- * PIO read or write request only if the bit for the requestor's *
- * region is set; otherwise, the II will not perform the requested *
- * operation and will return an error response. When a PIO read or *
- * write request targets an external Crosstalk widget, then not only *
- * must the bit for the requestor's region be set in the ILAPR, but *
- * also the target widget's bit in the IOWA register must be set in *
- * order for the II to perform the requested operation; otherwise, *
- * the II will return an error response. Hence, the protection *
- * provided by the IOWA register supplements the protection provided *
- * by the ILAPR for requests that target external Crosstalk widgets. *
- * This register itself can be accessed only by the nodes whose *
- * region ID bits are enabled in this same register. It can also be *
- * accessed through the IAlias space by the local processors. *
- * The reset value of this register allows access by all nodes. *
- * *
- ************************************************************************/
-
-typedef union ii_ilapr_u {
- u64 ii_ilapr_regval;
- struct {
- u64 i_region:64;
- } ii_ilapr_fld_s;
-} ii_ilapr_u_t;
-
-/************************************************************************
- * *
- * Description: A write to this register of the 64-bit value *
- * "SGIrules" in ASCII, will cause the bit in the ILAPR register *
- * corresponding to the region of the requestor to be set (allow *
- * access). A write of any other value will be ignored. Access *
- * protection for this register is "SGIrules". *
- * This register can also be accessed through the IAlias space. *
- * However, this access will not change the access permissions in the *
- * ILAPR. *
- * *
- ************************************************************************/
-
-typedef union ii_ilapo_u {
- u64 ii_ilapo_regval;
- struct {
- u64 i_io_ovrride:64;
- } ii_ilapo_fld_s;
-} ii_ilapo_u_t;
-
-/************************************************************************
- * *
- * This register qualifies all the PIO and Graphics writes launched *
- * from the SHUB towards a widget. *
- * *
- ************************************************************************/
-
-typedef union ii_iowa_u {
- u64 ii_iowa_regval;
- struct {
- u64 i_w0_oac:1;
- u64 i_rsvd_1:7;
- u64 i_wx_oac:8;
- u64 i_rsvd:48;
- } ii_iowa_fld_s;
-} ii_iowa_u_t;
-
-/************************************************************************
- * *
- * Description: This register qualifies all the requests launched *
- * from a widget towards the Shub. This register is intended to be *
- * used by software in case of misbehaving widgets. *
- * *
- * *
- ************************************************************************/
-
-typedef union ii_iiwa_u {
- u64 ii_iiwa_regval;
- struct {
- u64 i_w0_iac:1;
- u64 i_rsvd_1:7;
- u64 i_wx_iac:8;
- u64 i_rsvd:48;
- } ii_iiwa_fld_s;
-} ii_iiwa_u_t;
-
-/************************************************************************
- * *
- * Description: This register qualifies all the operations launched *
- * from a widget towards the SHub. It allows individual access *
- * control for up to 8 devices per widget. A device refers to *
- * individual DMA master hosted by a widget. *
- * The bits in each field of this register are cleared by the Shub *
- * upon detection of an error which requires the device to be *
- * disabled. These fields assume that 0=TNUM=7 (i.e., Bridge-centric *
- * Crosstalk). Whether or not a device has access rights to this *
- * Shub is determined by an AND of the device enable bit in the *
- * appropriate field of this register and the corresponding bit in *
- * the Wx_IAC field (for the widget which this device belongs to). *
- * The bits in this field are set by writing a 1 to them. Incoming *
- * replies from Crosstalk are not subject to this access control *
- * mechanism. *
- * *
- ************************************************************************/
-
-typedef union ii_iidem_u {
- u64 ii_iidem_regval;
- struct {
- u64 i_w8_dxs:8;
- u64 i_w9_dxs:8;
- u64 i_wa_dxs:8;
- u64 i_wb_dxs:8;
- u64 i_wc_dxs:8;
- u64 i_wd_dxs:8;
- u64 i_we_dxs:8;
- u64 i_wf_dxs:8;
- } ii_iidem_fld_s;
-} ii_iidem_u_t;
-
-/************************************************************************
- * *
- * This register contains the various programmable fields necessary *
- * for controlling and observing the LLP signals. *
- * *
- ************************************************************************/
-
-typedef union ii_ilcsr_u {
- u64 ii_ilcsr_regval;
- struct {
- u64 i_nullto:6;
- u64 i_rsvd_4:2;
- u64 i_wrmrst:1;
- u64 i_rsvd_3:1;
- u64 i_llp_en:1;
- u64 i_bm8:1;
- u64 i_llp_stat:2;
- u64 i_remote_power:1;
- u64 i_rsvd_2:1;
- u64 i_maxrtry:10;
- u64 i_d_avail_sel:2;
- u64 i_rsvd_1:4;
- u64 i_maxbrst:10;
- u64 i_rsvd:22;
-
- } ii_ilcsr_fld_s;
-} ii_ilcsr_u_t;
-
-/************************************************************************
- * *
- * This is simply a status registers that monitors the LLP error *
- * rate. *
- * *
- ************************************************************************/
-
-typedef union ii_illr_u {
- u64 ii_illr_regval;
- struct {
- u64 i_sn_cnt:16;
- u64 i_cb_cnt:16;
- u64 i_rsvd:32;
- } ii_illr_fld_s;
-} ii_illr_u_t;
-
-/************************************************************************
- * *
- * Description: All II-detected non-BTE error interrupts are *
- * specified via this register. *
- * NOTE: The PI interrupt register address is hardcoded in the II. If *
- * PI_ID==0, then the II sends an interrupt request (Duplonet PWRI *
- * packet) to address offset 0x0180_0090 within the local register *
- * address space of PI0 on the node specified by the NODE field. If *
- * PI_ID==1, then the II sends the interrupt request to address *
- * offset 0x01A0_0090 within the local register address space of PI1 *
- * on the node specified by the NODE field. *
- * *
- ************************************************************************/
-
-typedef union ii_iidsr_u {
- u64 ii_iidsr_regval;
- struct {
- u64 i_level:8;
- u64 i_pi_id:1;
- u64 i_node:11;
- u64 i_rsvd_3:4;
- u64 i_enable:1;
- u64 i_rsvd_2:3;
- u64 i_int_sent:2;
- u64 i_rsvd_1:2;
- u64 i_pi0_forward_int:1;
- u64 i_pi1_forward_int:1;
- u64 i_rsvd:30;
- } ii_iidsr_fld_s;
-} ii_iidsr_u_t;
-
-/************************************************************************
- * *
- * There are two instances of this register. This register is used *
- * for matching up the incoming responses from the graphics widget to *
- * the processor that initiated the graphics operation. The *
- * write-responses are converted to graphics credits and returned to *
- * the processor so that the processor interface can manage the flow *
- * control. *
- * *
- ************************************************************************/
-
-typedef union ii_igfx0_u {
- u64 ii_igfx0_regval;
- struct {
- u64 i_w_num:4;
- u64 i_pi_id:1;
- u64 i_n_num:12;
- u64 i_p_num:1;
- u64 i_rsvd:46;
- } ii_igfx0_fld_s;
-} ii_igfx0_u_t;
-
-/************************************************************************
- * *
- * There are two instances of this register. This register is used *
- * for matching up the incoming responses from the graphics widget to *
- * the processor that initiated the graphics operation. The *
- * write-responses are converted to graphics credits and returned to *
- * the processor so that the processor interface can manage the flow *
- * control. *
- * *
- ************************************************************************/
-
-typedef union ii_igfx1_u {
- u64 ii_igfx1_regval;
- struct {
- u64 i_w_num:4;
- u64 i_pi_id:1;
- u64 i_n_num:12;
- u64 i_p_num:1;
- u64 i_rsvd:46;
- } ii_igfx1_fld_s;
-} ii_igfx1_u_t;
-
-/************************************************************************
- * *
- * There are two instances of this registers. These registers are *
- * used as scratch registers for software use. *
- * *
- ************************************************************************/
-
-typedef union ii_iscr0_u {
- u64 ii_iscr0_regval;
- struct {
- u64 i_scratch:64;
- } ii_iscr0_fld_s;
-} ii_iscr0_u_t;
-
-/************************************************************************
- * *
- * There are two instances of this registers. These registers are *
- * used as scratch registers for software use. *
- * *
- ************************************************************************/
-
-typedef union ii_iscr1_u {
- u64 ii_iscr1_regval;
- struct {
- u64 i_scratch:64;
- } ii_iscr1_fld_s;
-} ii_iscr1_u_t;
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Shub Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the SHub is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the Shub is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-typedef union ii_itte1_u {
- u64 ii_itte1_regval;
- struct {
- u64 i_offset:5;
- u64 i_rsvd_1:3;
- u64 i_w_num:4;
- u64 i_iosp:1;
- u64 i_rsvd:51;
- } ii_itte1_fld_s;
-} ii_itte1_u_t;
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Shub Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Shub is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the Shub is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-typedef union ii_itte2_u {
- u64 ii_itte2_regval;
- struct {
- u64 i_offset:5;
- u64 i_rsvd_1:3;
- u64 i_w_num:4;
- u64 i_iosp:1;
- u64 i_rsvd:51;
- } ii_itte2_fld_s;
-} ii_itte2_u_t;
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Shub Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Shub is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the SHub is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-typedef union ii_itte3_u {
- u64 ii_itte3_regval;
- struct {
- u64 i_offset:5;
- u64 i_rsvd_1:3;
- u64 i_w_num:4;
- u64 i_iosp:1;
- u64 i_rsvd:51;
- } ii_itte3_fld_s;
-} ii_itte3_u_t;
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a SHub Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the SHub is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the SHub is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-typedef union ii_itte4_u {
- u64 ii_itte4_regval;
- struct {
- u64 i_offset:5;
- u64 i_rsvd_1:3;
- u64 i_w_num:4;
- u64 i_iosp:1;
- u64 i_rsvd:51;
- } ii_itte4_fld_s;
-} ii_itte4_u_t;
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a SHub Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Shub is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the Shub is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-typedef union ii_itte5_u {
- u64 ii_itte5_regval;
- struct {
- u64 i_offset:5;
- u64 i_rsvd_1:3;
- u64 i_w_num:4;
- u64 i_iosp:1;
- u64 i_rsvd:51;
- } ii_itte5_fld_s;
-} ii_itte5_u_t;
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Shub Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Shub is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the Shub is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-typedef union ii_itte6_u {
- u64 ii_itte6_regval;
- struct {
- u64 i_offset:5;
- u64 i_rsvd_1:3;
- u64 i_w_num:4;
- u64 i_iosp:1;
- u64 i_rsvd:51;
- } ii_itte6_fld_s;
-} ii_itte6_u_t;
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Shub Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Shub is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the SHub is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-typedef union ii_itte7_u {
- u64 ii_itte7_regval;
- struct {
- u64 i_offset:5;
- u64 i_rsvd_1:3;
- u64 i_w_num:4;
- u64 i_iosp:1;
- u64 i_rsvd:51;
- } ii_itte7_fld_s;
-} ii_itte7_u_t;
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of SHub and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-typedef union ii_iprb0_u {
- u64 ii_iprb0_regval;
- struct {
- u64 i_c:8;
- u64 i_na:14;
- u64 i_rsvd_2:2;
- u64 i_nb:14;
- u64 i_rsvd_1:2;
- u64 i_m:2;
- u64 i_f:1;
- u64 i_of_cnt:5;
- u64 i_error:1;
- u64 i_rd_to:1;
- u64 i_spur_wr:1;
- u64 i_spur_rd:1;
- u64 i_rsvd:11;
- u64 i_mult_err:1;
- } ii_iprb0_fld_s;
-} ii_iprb0_u_t;
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of SHub and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-typedef union ii_iprb8_u {
- u64 ii_iprb8_regval;
- struct {
- u64 i_c:8;
- u64 i_na:14;
- u64 i_rsvd_2:2;
- u64 i_nb:14;
- u64 i_rsvd_1:2;
- u64 i_m:2;
- u64 i_f:1;
- u64 i_of_cnt:5;
- u64 i_error:1;
- u64 i_rd_to:1;
- u64 i_spur_wr:1;
- u64 i_spur_rd:1;
- u64 i_rsvd:11;
- u64 i_mult_err:1;
- } ii_iprb8_fld_s;
-} ii_iprb8_u_t;
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of SHub and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-typedef union ii_iprb9_u {
- u64 ii_iprb9_regval;
- struct {
- u64 i_c:8;
- u64 i_na:14;
- u64 i_rsvd_2:2;
- u64 i_nb:14;
- u64 i_rsvd_1:2;
- u64 i_m:2;
- u64 i_f:1;
- u64 i_of_cnt:5;
- u64 i_error:1;
- u64 i_rd_to:1;
- u64 i_spur_wr:1;
- u64 i_spur_rd:1;
- u64 i_rsvd:11;
- u64 i_mult_err:1;
- } ii_iprb9_fld_s;
-} ii_iprb9_u_t;
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of SHub and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * *
- * *
- ************************************************************************/
-
-typedef union ii_iprba_u {
- u64 ii_iprba_regval;
- struct {
- u64 i_c:8;
- u64 i_na:14;
- u64 i_rsvd_2:2;
- u64 i_nb:14;
- u64 i_rsvd_1:2;
- u64 i_m:2;
- u64 i_f:1;
- u64 i_of_cnt:5;
- u64 i_error:1;
- u64 i_rd_to:1;
- u64 i_spur_wr:1;
- u64 i_spur_rd:1;
- u64 i_rsvd:11;
- u64 i_mult_err:1;
- } ii_iprba_fld_s;
-} ii_iprba_u_t;
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of SHub and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-typedef union ii_iprbb_u {
- u64 ii_iprbb_regval;
- struct {
- u64 i_c:8;
- u64 i_na:14;
- u64 i_rsvd_2:2;
- u64 i_nb:14;
- u64 i_rsvd_1:2;
- u64 i_m:2;
- u64 i_f:1;
- u64 i_of_cnt:5;
- u64 i_error:1;
- u64 i_rd_to:1;
- u64 i_spur_wr:1;
- u64 i_spur_rd:1;
- u64 i_rsvd:11;
- u64 i_mult_err:1;
- } ii_iprbb_fld_s;
-} ii_iprbb_u_t;
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of SHub and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-typedef union ii_iprbc_u {
- u64 ii_iprbc_regval;
- struct {
- u64 i_c:8;
- u64 i_na:14;
- u64 i_rsvd_2:2;
- u64 i_nb:14;
- u64 i_rsvd_1:2;
- u64 i_m:2;
- u64 i_f:1;
- u64 i_of_cnt:5;
- u64 i_error:1;
- u64 i_rd_to:1;
- u64 i_spur_wr:1;
- u64 i_spur_rd:1;
- u64 i_rsvd:11;
- u64 i_mult_err:1;
- } ii_iprbc_fld_s;
-} ii_iprbc_u_t;
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of SHub and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-typedef union ii_iprbd_u {
- u64 ii_iprbd_regval;
- struct {
- u64 i_c:8;
- u64 i_na:14;
- u64 i_rsvd_2:2;
- u64 i_nb:14;
- u64 i_rsvd_1:2;
- u64 i_m:2;
- u64 i_f:1;
- u64 i_of_cnt:5;
- u64 i_error:1;
- u64 i_rd_to:1;
- u64 i_spur_wr:1;
- u64 i_spur_rd:1;
- u64 i_rsvd:11;
- u64 i_mult_err:1;
- } ii_iprbd_fld_s;
-} ii_iprbd_u_t;
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of SHub and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-typedef union ii_iprbe_u {
- u64 ii_iprbe_regval;
- struct {
- u64 i_c:8;
- u64 i_na:14;
- u64 i_rsvd_2:2;
- u64 i_nb:14;
- u64 i_rsvd_1:2;
- u64 i_m:2;
- u64 i_f:1;
- u64 i_of_cnt:5;
- u64 i_error:1;
- u64 i_rd_to:1;
- u64 i_spur_wr:1;
- u64 i_spur_rd:1;
- u64 i_rsvd:11;
- u64 i_mult_err:1;
- } ii_iprbe_fld_s;
-} ii_iprbe_u_t;
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of Shub and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-typedef union ii_iprbf_u {
- u64 ii_iprbf_regval;
- struct {
- u64 i_c:8;
- u64 i_na:14;
- u64 i_rsvd_2:2;
- u64 i_nb:14;
- u64 i_rsvd_1:2;
- u64 i_m:2;
- u64 i_f:1;
- u64 i_of_cnt:5;
- u64 i_error:1;
- u64 i_rd_to:1;
- u64 i_spur_wr:1;
- u64 i_spur_rd:1;
- u64 i_rsvd:11;
- u64 i_mult_err:1;
- } ii_iprbe_fld_s;
-} ii_iprbf_u_t;
-
-/************************************************************************
- * *
- * This register specifies the timeout value to use for monitoring *
- * Crosstalk credits which are used outbound to Crosstalk. An *
- * internal counter called the Crosstalk Credit Timeout Counter *
- * increments every 128 II clocks. The counter starts counting *
- * anytime the credit count drops below a threshold, and resets to *
- * zero (stops counting) anytime the credit count is at or above the *
- * threshold. The threshold is 1 credit in direct connect mode and 2 *
- * in Crossbow connect mode. When the internal Crosstalk Credit *
- * Timeout Counter reaches the value programmed in this register, a *
- * Crosstalk Credit Timeout has occurred. The internal counter is not *
- * readable from software, and stops counting at its maximum value, *
- * so it cannot cause more than one interrupt. *
- * *
- ************************************************************************/
-
-typedef union ii_ixcc_u {
- u64 ii_ixcc_regval;
- struct {
- u64 i_time_out:26;
- u64 i_rsvd:38;
- } ii_ixcc_fld_s;
-} ii_ixcc_u_t;
-
-/************************************************************************
- * *
- * Description: This register qualifies all the PIO and DMA *
- * operations launched from widget 0 towards the SHub. In *
- * addition, it also qualifies accesses by the BTE streams. *
- * The bits in each field of this register are cleared by the SHub *
- * upon detection of an error which requires widget 0 or the BTE *
- * streams to be terminated. Whether or not widget x has access *
- * rights to this SHub is determined by an AND of the device *
- * enable bit in the appropriate field of this register and bit 0 in *
- * the Wx_IAC field. The bits in this field are set by writing a 1 to *
- * them. Incoming replies from Crosstalk are not subject to this *
- * access control mechanism. *
- * *
- ************************************************************************/
-
-typedef union ii_imem_u {
- u64 ii_imem_regval;
- struct {
- u64 i_w0_esd:1;
- u64 i_rsvd_3:3;
- u64 i_b0_esd:1;
- u64 i_rsvd_2:3;
- u64 i_b1_esd:1;
- u64 i_rsvd_1:3;
- u64 i_clr_precise:1;
- u64 i_rsvd:51;
- } ii_imem_fld_s;
-} ii_imem_u_t;
-
-/************************************************************************
- * *
- * Description: This register specifies the timeout value to use for *
- * monitoring Crosstalk tail flits coming into the Shub in the *
- * TAIL_TO field. An internal counter associated with this register *
- * is incremented every 128 II internal clocks (7 bits). The counter *
- * starts counting anytime a header micropacket is received and stops *
- * counting (and resets to zero) any time a micropacket with a Tail *
- * bit is received. Once the counter reaches the threshold value *
- * programmed in this register, it generates an interrupt to the *
- * processor that is programmed into the IIDSR. The counter saturates *
- * (does not roll over) at its maximum value, so it cannot cause *
- * another interrupt until after it is cleared. *
- * The register also contains the Read Response Timeout values. The *
- * Prescalar is 23 bits, and counts II clocks. An internal counter *
- * increments on every II clock and when it reaches the value in the *
- * Prescalar field, all IPRTE registers with their valid bits set *
- * have their Read Response timers bumped. Whenever any of them match *
- * the value in the RRSP_TO field, a Read Response Timeout has *
- * occurred, and error handling occurs as described in the Error *
- * Handling section of this document. *
- * *
- ************************************************************************/
-
-typedef union ii_ixtt_u {
- u64 ii_ixtt_regval;
- struct {
- u64 i_tail_to:26;
- u64 i_rsvd_1:6;
- u64 i_rrsp_ps:23;
- u64 i_rrsp_to:5;
- u64 i_rsvd:4;
- } ii_ixtt_fld_s;
-} ii_ixtt_u_t;
-
-/************************************************************************
- * *
- * Writing a 1 to the fields of this register clears the appropriate *
- * error bits in other areas of SHub. Note that when the *
- * E_PRB_x bits are used to clear error bits in PRB registers, *
- * SPUR_RD and SPUR_WR may persist, because they require additional *
- * action to clear them. See the IPRBx and IXSS Register *
- * specifications. *
- * *
- ************************************************************************/
-
-typedef union ii_ieclr_u {
- u64 ii_ieclr_regval;
- struct {
- u64 i_e_prb_0:1;
- u64 i_rsvd:7;
- u64 i_e_prb_8:1;
- u64 i_e_prb_9:1;
- u64 i_e_prb_a:1;
- u64 i_e_prb_b:1;
- u64 i_e_prb_c:1;
- u64 i_e_prb_d:1;
- u64 i_e_prb_e:1;
- u64 i_e_prb_f:1;
- u64 i_e_crazy:1;
- u64 i_e_bte_0:1;
- u64 i_e_bte_1:1;
- u64 i_reserved_1:10;
- u64 i_spur_rd_hdr:1;
- u64 i_cam_intr_to:1;
- u64 i_cam_overflow:1;
- u64 i_cam_read_miss:1;
- u64 i_ioq_rep_underflow:1;
- u64 i_ioq_req_underflow:1;
- u64 i_ioq_rep_overflow:1;
- u64 i_ioq_req_overflow:1;
- u64 i_iiq_rep_overflow:1;
- u64 i_iiq_req_overflow:1;
- u64 i_ii_xn_rep_cred_overflow:1;
- u64 i_ii_xn_req_cred_overflow:1;
- u64 i_ii_xn_invalid_cmd:1;
- u64 i_xn_ii_invalid_cmd:1;
- u64 i_reserved_2:21;
- } ii_ieclr_fld_s;
-} ii_ieclr_u_t;
-
-/************************************************************************
- * *
- * This register controls both BTEs. SOFT_RESET is intended for *
- * recovery after an error. COUNT controls the total number of CRBs *
- * that both BTEs (combined) can use, which affects total BTE *
- * bandwidth. *
- * *
- ************************************************************************/
-
-typedef union ii_ibcr_u {
- u64 ii_ibcr_regval;
- struct {
- u64 i_count:4;
- u64 i_rsvd_1:4;
- u64 i_soft_reset:1;
- u64 i_rsvd:55;
- } ii_ibcr_fld_s;
-} ii_ibcr_u_t;
-
-/************************************************************************
- * *
- * This register contains the header of a spurious read response *
- * received from Crosstalk. A spurious read response is defined as a *
- * read response received by II from a widget for which (1) the SIDN *
- * has a value between 1 and 7, inclusive (II never sends requests to *
- * these widgets (2) there is no valid IPRTE register which *
- * corresponds to the TNUM, or (3) the widget indicated in SIDN is *
- * not the same as the widget recorded in the IPRTE register *
- * referenced by the TNUM. If this condition is true, and if the *
- * IXSS[VALID] bit is clear, then the header of the spurious read *
- * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The *
- * errant header is thereby captured, and no further spurious read *
- * respones are captured until IXSS[VALID] is cleared by setting the *
- * appropriate bit in IECLR.Everytime a spurious read response is *
- * detected, the SPUR_RD bit of the PRB corresponding to the incoming *
- * message's SIDN field is set. This always happens, regarless of *
- * whether a header is captured. The programmer should check *
- * IXSM[SIDN] to determine which widget sent the spurious response, *
- * because there may be more than one SPUR_RD bit set in the PRB *
- * registers. The widget indicated by IXSM[SIDN] was the first *
- * spurious read response to be received since the last time *
- * IXSS[VALID] was clear. The SPUR_RD bit of the corresponding PRB *
- * will be set. Any SPUR_RD bits in any other PRB registers indicate *
- * spurious messages from other widets which were detected after the *
- * header was captured.. *
- * *
- ************************************************************************/
-
-typedef union ii_ixsm_u {
- u64 ii_ixsm_regval;
- struct {
- u64 i_byte_en:32;
- u64 i_reserved:1;
- u64 i_tag:3;
- u64 i_alt_pactyp:4;
- u64 i_bo:1;
- u64 i_error:1;
- u64 i_vbpm:1;
- u64 i_gbr:1;
- u64 i_ds:2;
- u64 i_ct:1;
- u64 i_tnum:5;
- u64 i_pactyp:4;
- u64 i_sidn:4;
- u64 i_didn:4;
- } ii_ixsm_fld_s;
-} ii_ixsm_u_t;
-
-/************************************************************************
- * *
- * This register contains the sideband bits of a spurious read *
- * response received from Crosstalk. *
- * *
- ************************************************************************/
-
-typedef union ii_ixss_u {
- u64 ii_ixss_regval;
- struct {
- u64 i_sideband:8;
- u64 i_rsvd:55;
- u64 i_valid:1;
- } ii_ixss_fld_s;
-} ii_ixss_u_t;
-
-/************************************************************************
- * *
- * This register enables software to access the II LLP's test port. *
- * Refer to the LLP 2.5 documentation for an explanation of the test *
- * port. Software can write to this register to program the values *
- * for the control fields (TestErrCapture, TestClear, TestFlit, *
- * TestMask and TestSeed). Similarly, software can read from this *
- * register to obtain the values of the test port's status outputs *
- * (TestCBerr, TestValid and TestData). *
- * *
- ************************************************************************/
-
-typedef union ii_ilct_u {
- u64 ii_ilct_regval;
- struct {
- u64 i_test_seed:20;
- u64 i_test_mask:8;
- u64 i_test_data:20;
- u64 i_test_valid:1;
- u64 i_test_cberr:1;
- u64 i_test_flit:3;
- u64 i_test_clear:1;
- u64 i_test_err_capture:1;
- u64 i_rsvd:9;
- } ii_ilct_fld_s;
-} ii_ilct_u_t;
-
-/************************************************************************
- * *
- * If the II detects an illegal incoming Duplonet packet (request or *
- * reply) when VALID==0 in the IIEPH1 register, then it saves the *
- * contents of the packet's header flit in the IIEPH1 and IIEPH2 *
- * registers, sets the VALID bit in IIEPH1, clears the OVERRUN bit, *
- * and assigns a value to the ERR_TYPE field which indicates the *
- * specific nature of the error. The II recognizes four different *
- * types of errors: short request packets (ERR_TYPE==2), short reply *
- * packets (ERR_TYPE==3), long request packets (ERR_TYPE==4) and long *
- * reply packets (ERR_TYPE==5). The encodings for these types of *
- * errors were chosen to be consistent with the same types of errors *
- * indicated by the ERR_TYPE field in the LB_ERROR_HDR1 register (in *
- * the LB unit). If the II detects an illegal incoming Duplonet *
- * packet when VALID==1 in the IIEPH1 register, then it merely sets *
- * the OVERRUN bit to indicate that a subsequent error has happened, *
- * and does nothing further. *
- * *
- ************************************************************************/
-
-typedef union ii_iieph1_u {
- u64 ii_iieph1_regval;
- struct {
- u64 i_command:7;
- u64 i_rsvd_5:1;
- u64 i_suppl:14;
- u64 i_rsvd_4:1;
- u64 i_source:14;
- u64 i_rsvd_3:1;
- u64 i_err_type:4;
- u64 i_rsvd_2:4;
- u64 i_overrun:1;
- u64 i_rsvd_1:3;
- u64 i_valid:1;
- u64 i_rsvd:13;
- } ii_iieph1_fld_s;
-} ii_iieph1_u_t;
-
-/************************************************************************
- * *
- * This register holds the Address field from the header flit of an *
- * incoming erroneous Duplonet packet, along with the tail bit which *
- * accompanied this header flit. This register is essentially an *
- * extension of IIEPH1. Two registers were necessary because the 64 *
- * bits available in only a single register were insufficient to *
- * capture the entire header flit of an erroneous packet. *
- * *
- ************************************************************************/
-
-typedef union ii_iieph2_u {
- u64 ii_iieph2_regval;
- struct {
- u64 i_rsvd_0:3;
- u64 i_address:47;
- u64 i_rsvd_1:10;
- u64 i_tail:1;
- u64 i_rsvd:3;
- } ii_iieph2_fld_s;
-} ii_iieph2_u_t;
-
-/******************************/
-
-/************************************************************************
- * *
- * This register's value is a bit vector that guards access from SXBs *
- * to local registers within the II as well as to external Crosstalk *
- * widgets *
- * *
- ************************************************************************/
-
-typedef union ii_islapr_u {
- u64 ii_islapr_regval;
- struct {
- u64 i_region:64;
- } ii_islapr_fld_s;
-} ii_islapr_u_t;
-
-/************************************************************************
- * *
- * A write to this register of the 56-bit value "Pup+Bun" will cause *
- * the bit in the ISLAPR register corresponding to the region of the *
- * requestor to be set (access allowed). (
- * *
- ************************************************************************/
-
-typedef union ii_islapo_u {
- u64 ii_islapo_regval;
- struct {
- u64 i_io_sbx_ovrride:56;
- u64 i_rsvd:8;
- } ii_islapo_fld_s;
-} ii_islapo_u_t;
-
-/************************************************************************
- * *
- * Determines how long the wrapper will wait aftr an interrupt is *
- * initially issued from the II before it times out the outstanding *
- * interrupt and drops it from the interrupt queue. *
- * *
- ************************************************************************/
-
-typedef union ii_iwi_u {
- u64 ii_iwi_regval;
- struct {
- u64 i_prescale:24;
- u64 i_rsvd:8;
- u64 i_timeout:8;
- u64 i_rsvd1:8;
- u64 i_intrpt_retry_period:8;
- u64 i_rsvd2:8;
- } ii_iwi_fld_s;
-} ii_iwi_u_t;
-
-/************************************************************************
- * *
- * Log errors which have occurred in the II wrapper. The errors are *
- * cleared by writing to the IECLR register. *
- * *
- ************************************************************************/
-
-typedef union ii_iwel_u {
- u64 ii_iwel_regval;
- struct {
- u64 i_intr_timed_out:1;
- u64 i_rsvd:7;
- u64 i_cam_overflow:1;
- u64 i_cam_read_miss:1;
- u64 i_rsvd1:2;
- u64 i_ioq_rep_underflow:1;
- u64 i_ioq_req_underflow:1;
- u64 i_ioq_rep_overflow:1;
- u64 i_ioq_req_overflow:1;
- u64 i_iiq_rep_overflow:1;
- u64 i_iiq_req_overflow:1;
- u64 i_rsvd2:6;
- u64 i_ii_xn_rep_cred_over_under:1;
- u64 i_ii_xn_req_cred_over_under:1;
- u64 i_rsvd3:6;
- u64 i_ii_xn_invalid_cmd:1;
- u64 i_xn_ii_invalid_cmd:1;
- u64 i_rsvd4:30;
- } ii_iwel_fld_s;
-} ii_iwel_u_t;
-
-/************************************************************************
- * *
- * Controls the II wrapper. *
- * *
- ************************************************************************/
-
-typedef union ii_iwc_u {
- u64 ii_iwc_regval;
- struct {
- u64 i_dma_byte_swap:1;
- u64 i_rsvd:3;
- u64 i_cam_read_lines_reset:1;
- u64 i_rsvd1:3;
- u64 i_ii_xn_cred_over_under_log:1;
- u64 i_rsvd2:19;
- u64 i_xn_rep_iq_depth:5;
- u64 i_rsvd3:3;
- u64 i_xn_req_iq_depth:5;
- u64 i_rsvd4:3;
- u64 i_iiq_depth:6;
- u64 i_rsvd5:12;
- u64 i_force_rep_cred:1;
- u64 i_force_req_cred:1;
- } ii_iwc_fld_s;
-} ii_iwc_u_t;
-
-/************************************************************************
- * *
- * Status in the II wrapper. *
- * *
- ************************************************************************/
-
-typedef union ii_iws_u {
- u64 ii_iws_regval;
- struct {
- u64 i_xn_rep_iq_credits:5;
- u64 i_rsvd:3;
- u64 i_xn_req_iq_credits:5;
- u64 i_rsvd1:51;
- } ii_iws_fld_s;
-} ii_iws_u_t;
-
-/************************************************************************
- * *
- * Masks errors in the IWEL register. *
- * *
- ************************************************************************/
-
-typedef union ii_iweim_u {
- u64 ii_iweim_regval;
- struct {
- u64 i_intr_timed_out:1;
- u64 i_rsvd:7;
- u64 i_cam_overflow:1;
- u64 i_cam_read_miss:1;
- u64 i_rsvd1:2;
- u64 i_ioq_rep_underflow:1;
- u64 i_ioq_req_underflow:1;
- u64 i_ioq_rep_overflow:1;
- u64 i_ioq_req_overflow:1;
- u64 i_iiq_rep_overflow:1;
- u64 i_iiq_req_overflow:1;
- u64 i_rsvd2:6;
- u64 i_ii_xn_rep_cred_overflow:1;
- u64 i_ii_xn_req_cred_overflow:1;
- u64 i_rsvd3:6;
- u64 i_ii_xn_invalid_cmd:1;
- u64 i_xn_ii_invalid_cmd:1;
- u64 i_rsvd4:30;
- } ii_iweim_fld_s;
-} ii_iweim_u_t;
-
-/************************************************************************
- * *
- * A write to this register causes a particular field in the *
- * corresponding widget's PRB entry to be adjusted up or down by 1. *
- * This counter should be used when recovering from error and reset *
- * conditions. Note that software would be capable of causing *
- * inadvertent overflow or underflow of these counters. *
- * *
- ************************************************************************/
-
-typedef union ii_ipca_u {
- u64 ii_ipca_regval;
- struct {
- u64 i_wid:4;
- u64 i_adjust:1;
- u64 i_rsvd_1:3;
- u64 i_field:2;
- u64 i_rsvd:54;
- } ii_ipca_fld_s;
-} ii_ipca_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte0a_u {
- u64 ii_iprte0a_regval;
- struct {
- u64 i_rsvd_1:54;
- u64 i_widget:4;
- u64 i_to_cnt:5;
- u64 i_vld:1;
- } ii_iprte0a_fld_s;
-} ii_iprte0a_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte1a_u {
- u64 ii_iprte1a_regval;
- struct {
- u64 i_rsvd_1:54;
- u64 i_widget:4;
- u64 i_to_cnt:5;
- u64 i_vld:1;
- } ii_iprte1a_fld_s;
-} ii_iprte1a_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte2a_u {
- u64 ii_iprte2a_regval;
- struct {
- u64 i_rsvd_1:54;
- u64 i_widget:4;
- u64 i_to_cnt:5;
- u64 i_vld:1;
- } ii_iprte2a_fld_s;
-} ii_iprte2a_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte3a_u {
- u64 ii_iprte3a_regval;
- struct {
- u64 i_rsvd_1:54;
- u64 i_widget:4;
- u64 i_to_cnt:5;
- u64 i_vld:1;
- } ii_iprte3a_fld_s;
-} ii_iprte3a_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte4a_u {
- u64 ii_iprte4a_regval;
- struct {
- u64 i_rsvd_1:54;
- u64 i_widget:4;
- u64 i_to_cnt:5;
- u64 i_vld:1;
- } ii_iprte4a_fld_s;
-} ii_iprte4a_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte5a_u {
- u64 ii_iprte5a_regval;
- struct {
- u64 i_rsvd_1:54;
- u64 i_widget:4;
- u64 i_to_cnt:5;
- u64 i_vld:1;
- } ii_iprte5a_fld_s;
-} ii_iprte5a_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte6a_u {
- u64 ii_iprte6a_regval;
- struct {
- u64 i_rsvd_1:54;
- u64 i_widget:4;
- u64 i_to_cnt:5;
- u64 i_vld:1;
- } ii_iprte6a_fld_s;
-} ii_iprte6a_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte7a_u {
- u64 ii_iprte7a_regval;
- struct {
- u64 i_rsvd_1:54;
- u64 i_widget:4;
- u64 i_to_cnt:5;
- u64 i_vld:1;
- } ii_iprtea7_fld_s;
-} ii_iprte7a_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte0b_u {
- u64 ii_iprte0b_regval;
- struct {
- u64 i_rsvd_1:3;
- u64 i_address:47;
- u64 i_init:3;
- u64 i_source:11;
- } ii_iprte0b_fld_s;
-} ii_iprte0b_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte1b_u {
- u64 ii_iprte1b_regval;
- struct {
- u64 i_rsvd_1:3;
- u64 i_address:47;
- u64 i_init:3;
- u64 i_source:11;
- } ii_iprte1b_fld_s;
-} ii_iprte1b_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte2b_u {
- u64 ii_iprte2b_regval;
- struct {
- u64 i_rsvd_1:3;
- u64 i_address:47;
- u64 i_init:3;
- u64 i_source:11;
- } ii_iprte2b_fld_s;
-} ii_iprte2b_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte3b_u {
- u64 ii_iprte3b_regval;
- struct {
- u64 i_rsvd_1:3;
- u64 i_address:47;
- u64 i_init:3;
- u64 i_source:11;
- } ii_iprte3b_fld_s;
-} ii_iprte3b_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte4b_u {
- u64 ii_iprte4b_regval;
- struct {
- u64 i_rsvd_1:3;
- u64 i_address:47;
- u64 i_init:3;
- u64 i_source:11;
- } ii_iprte4b_fld_s;
-} ii_iprte4b_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte5b_u {
- u64 ii_iprte5b_regval;
- struct {
- u64 i_rsvd_1:3;
- u64 i_address:47;
- u64 i_init:3;
- u64 i_source:11;
- } ii_iprte5b_fld_s;
-} ii_iprte5b_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte6b_u {
- u64 ii_iprte6b_regval;
- struct {
- u64 i_rsvd_1:3;
- u64 i_address:47;
- u64 i_init:3;
- u64 i_source:11;
-
- } ii_iprte6b_fld_s;
-} ii_iprte6b_u_t;
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-typedef union ii_iprte7b_u {
- u64 ii_iprte7b_regval;
- struct {
- u64 i_rsvd_1:3;
- u64 i_address:47;
- u64 i_init:3;
- u64 i_source:11;
- } ii_iprte7b_fld_s;
-} ii_iprte7b_u_t;
-
-/************************************************************************
- * *
- * Description: SHub II contains a feature which did not exist in *
- * the Hub which automatically cleans up after a Read Response *
- * timeout, including deallocation of the IPRTE and recovery of IBuf *
- * space. The inclusion of this register in SHub is for backward *
- * compatibility *
- * A write to this register causes an entry from the table of *
- * outstanding PIO Read Requests to be freed and returned to the *
- * stack of free entries. This register is used in handling the *
- * timeout errors that result in a PIO Reply never returning from *
- * Crosstalk. *
- * Note that this register does not affect the contents of the IPRTE *
- * registers. The Valid bits in those registers have to be *
- * specifically turned off by software. *
- * *
- ************************************************************************/
-
-typedef union ii_ipdr_u {
- u64 ii_ipdr_regval;
- struct {
- u64 i_te:3;
- u64 i_rsvd_1:1;
- u64 i_pnd:1;
- u64 i_init_rpcnt:1;
- u64 i_rsvd:58;
- } ii_ipdr_fld_s;
-} ii_ipdr_u_t;
-
-/************************************************************************
- * *
- * A write to this register causes a CRB entry to be returned to the *
- * queue of free CRBs. The entry should have previously been cleared *
- * (mark bit) via backdoor access to the pertinent CRB entry. This *
- * register is used in the last step of handling the errors that are *
- * captured and marked in CRB entries. Briefly: 1) first error for *
- * DMA write from a particular device, and first error for a *
- * particular BTE stream, lead to a marked CRB entry, and processor *
- * interrupt, 2) software reads the error information captured in the *
- * CRB entry, and presumably takes some corrective action, 3) *
- * software clears the mark bit, and finally 4) software writes to *
- * the ICDR register to return the CRB entry to the list of free CRB *
- * entries. *
- * *
- ************************************************************************/
-
-typedef union ii_icdr_u {
- u64 ii_icdr_regval;
- struct {
- u64 i_crb_num:4;
- u64 i_pnd:1;
- u64 i_rsvd:59;
- } ii_icdr_fld_s;
-} ii_icdr_u_t;
-
-/************************************************************************
- * *
- * This register provides debug access to two FIFOs inside of II. *
- * Both IOQ_MAX* fields of this register contain the instantaneous *
- * depth (in units of the number of available entries) of the *
- * associated IOQ FIFO. A read of this register will return the *
- * number of free entries on each FIFO at the time of the read. So *
- * when a FIFO is idle, the associated field contains the maximum *
- * depth of the FIFO. This register is writable for debug reasons *
- * and is intended to be written with the maximum desired FIFO depth *
- * while the FIFO is idle. Software must assure that II is idle when *
- * this register is written. If there are any active entries in any *
- * of these FIFOs when this register is written, the results are *
- * undefined. *
- * *
- ************************************************************************/
-
-typedef union ii_ifdr_u {
- u64 ii_ifdr_regval;
- struct {
- u64 i_ioq_max_rq:7;
- u64 i_set_ioq_rq:1;
- u64 i_ioq_max_rp:7;
- u64 i_set_ioq_rp:1;
- u64 i_rsvd:48;
- } ii_ifdr_fld_s;
-} ii_ifdr_u_t;
-
-/************************************************************************
- * *
- * This register allows the II to become sluggish in removing *
- * messages from its inbound queue (IIQ). This will cause messages to *
- * back up in either virtual channel. Disabling the "molasses" mode *
- * subsequently allows the II to be tested under stress. In the *
- * sluggish ("Molasses") mode, the localized effects of congestion *
- * can be observed. *
- * *
- ************************************************************************/
-
-typedef union ii_iiap_u {
- u64 ii_iiap_regval;
- struct {
- u64 i_rq_mls:6;
- u64 i_rsvd_1:2;
- u64 i_rp_mls:6;
- u64 i_rsvd:50;
- } ii_iiap_fld_s;
-} ii_iiap_u_t;
-
-/************************************************************************
- * *
- * This register allows several parameters of CRB operation to be *
- * set. Note that writing to this register can have catastrophic side *
- * effects, if the CRB is not quiescent, i.e. if the CRB is *
- * processing protocol messages when the write occurs. *
- * *
- ************************************************************************/
-
-typedef union ii_icmr_u {
- u64 ii_icmr_regval;
- struct {
- u64 i_sp_msg:1;
- u64 i_rd_hdr:1;
- u64 i_rsvd_4:2;
- u64 i_c_cnt:4;
- u64 i_rsvd_3:4;
- u64 i_clr_rqpd:1;
- u64 i_clr_rppd:1;
- u64 i_rsvd_2:2;
- u64 i_fc_cnt:4;
- u64 i_crb_vld:15;
- u64 i_crb_mark:15;
- u64 i_rsvd_1:2;
- u64 i_precise:1;
- u64 i_rsvd:11;
- } ii_icmr_fld_s;
-} ii_icmr_u_t;
-
-/************************************************************************
- * *
- * This register allows control of the table portion of the CRB *
- * logic via software. Control operations from this register have *
- * priority over all incoming Crosstalk or BTE requests. *
- * *
- ************************************************************************/
-
-typedef union ii_iccr_u {
- u64 ii_iccr_regval;
- struct {
- u64 i_crb_num:4;
- u64 i_rsvd_1:4;
- u64 i_cmd:8;
- u64 i_pending:1;
- u64 i_rsvd:47;
- } ii_iccr_fld_s;
-} ii_iccr_u_t;
-
-/************************************************************************
- * *
- * This register allows the maximum timeout value to be programmed. *
- * *
- ************************************************************************/
-
-typedef union ii_icto_u {
- u64 ii_icto_regval;
- struct {
- u64 i_timeout:8;
- u64 i_rsvd:56;
- } ii_icto_fld_s;
-} ii_icto_u_t;
-
-/************************************************************************
- * *
- * This register allows the timeout prescalar to be programmed. An *
- * internal counter is associated with this register. When the *
- * internal counter reaches the value of the PRESCALE field, the *
- * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT] *
- * field). The internal counter resets to zero, and then continues *
- * counting. *
- * *
- ************************************************************************/
-
-typedef union ii_ictp_u {
- u64 ii_ictp_regval;
- struct {
- u64 i_prescale:24;
- u64 i_rsvd:40;
- } ii_ictp_fld_s;
-} ii_ictp_u_t;
-
-/************************************************************************
- * *
- * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
- * used for Crosstalk operations (both cacheline and partial *
- * operations) or BTE/IO. Because the CRB entries are very wide, five *
- * registers (_A to _E) are required to read and write each entry. *
- * The CRB Entry registers can be conceptualized as rows and columns *
- * (illustrated in the table above). Each row contains the 4 *
- * registers required for a single CRB Entry. The first doubleword *
- * (column) for each entry is labeled A, and the second doubleword *
- * (higher address) is labeled B, the third doubleword is labeled C, *
- * the fourth doubleword is labeled D and the fifth doubleword is *
- * labeled E. All CRB entries have their addresses on a quarter *
- * cacheline aligned boundary. *
- * Upon reset, only the following fields are initialized: valid *
- * (VLD), priority count, timeout, timeout valid, and context valid. *
- * All other bits should be cleared by software before use (after *
- * recovering any potential error state from before the reset). *
- * The following four tables summarize the format for the four *
- * registers that are used for each ICRB# Entry. *
- * *
- ************************************************************************/
-
-typedef union ii_icrb0_a_u {
- u64 ii_icrb0_a_regval;
- struct {
- u64 ia_iow:1;
- u64 ia_vld:1;
- u64 ia_addr:47;
- u64 ia_tnum:5;
- u64 ia_sidn:4;
- u64 ia_rsvd:6;
- } ii_icrb0_a_fld_s;
-} ii_icrb0_a_u_t;
-
-/************************************************************************
- * *
- * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
- * used for Crosstalk operations (both cacheline and partial *
- * operations) or BTE/IO. Because the CRB entries are very wide, five *
- * registers (_A to _E) are required to read and write each entry. *
- * *
- ************************************************************************/
-
-typedef union ii_icrb0_b_u {
- u64 ii_icrb0_b_regval;
- struct {
- u64 ib_xt_err:1;
- u64 ib_mark:1;
- u64 ib_ln_uce:1;
- u64 ib_errcode:3;
- u64 ib_error:1;
- u64 ib_stall__bte_1:1;
- u64 ib_stall__bte_0:1;
- u64 ib_stall__intr:1;
- u64 ib_stall_ib:1;
- u64 ib_intvn:1;
- u64 ib_wb:1;
- u64 ib_hold:1;
- u64 ib_ack:1;
- u64 ib_resp:1;
- u64 ib_ack_cnt:11;
- u64 ib_rsvd:7;
- u64 ib_exc:5;
- u64 ib_init:3;
- u64 ib_imsg:8;
- u64 ib_imsgtype:2;
- u64 ib_use_old:1;
- u64 ib_rsvd_1:11;
- } ii_icrb0_b_fld_s;
-} ii_icrb0_b_u_t;
-
-/************************************************************************
- * *
- * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
- * used for Crosstalk operations (both cacheline and partial *
- * operations) or BTE/IO. Because the CRB entries are very wide, five *
- * registers (_A to _E) are required to read and write each entry. *
- * *
- ************************************************************************/
-
-typedef union ii_icrb0_c_u {
- u64 ii_icrb0_c_regval;
- struct {
- u64 ic_source:15;
- u64 ic_size:2;
- u64 ic_ct:1;
- u64 ic_bte_num:1;
- u64 ic_gbr:1;
- u64 ic_resprqd:1;
- u64 ic_bo:1;
- u64 ic_suppl:15;
- u64 ic_rsvd:27;
- } ii_icrb0_c_fld_s;
-} ii_icrb0_c_u_t;
-
-/************************************************************************
- * *
- * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
- * used for Crosstalk operations (both cacheline and partial *
- * operations) or BTE/IO. Because the CRB entries are very wide, five *
- * registers (_A to _E) are required to read and write each entry. *
- * *
- ************************************************************************/
-
-typedef union ii_icrb0_d_u {
- u64 ii_icrb0_d_regval;
- struct {
- u64 id_pa_be:43;
- u64 id_bte_op:1;
- u64 id_pr_psc:4;
- u64 id_pr_cnt:4;
- u64 id_sleep:1;
- u64 id_rsvd:11;
- } ii_icrb0_d_fld_s;
-} ii_icrb0_d_u_t;
-
-/************************************************************************
- * *
- * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
- * used for Crosstalk operations (both cacheline and partial *
- * operations) or BTE/IO. Because the CRB entries are very wide, five *
- * registers (_A to _E) are required to read and write each entry. *
- * *
- ************************************************************************/
-
-typedef union ii_icrb0_e_u {
- u64 ii_icrb0_e_regval;
- struct {
- u64 ie_timeout:8;
- u64 ie_context:15;
- u64 ie_rsvd:1;
- u64 ie_tvld:1;
- u64 ie_cvld:1;
- u64 ie_rsvd_0:38;
- } ii_icrb0_e_fld_s;
-} ii_icrb0_e_u_t;
-
-/************************************************************************
- * *
- * This register contains the lower 64 bits of the header of the *
- * spurious message captured by II. Valid when the SP_MSG bit in ICMR *
- * register is set. *
- * *
- ************************************************************************/
-
-typedef union ii_icsml_u {
- u64 ii_icsml_regval;
- struct {
- u64 i_tt_addr:47;
- u64 i_newsuppl_ex:14;
- u64 i_reserved:2;
- u64 i_overflow:1;
- } ii_icsml_fld_s;
-} ii_icsml_u_t;
-
-/************************************************************************
- * *
- * This register contains the middle 64 bits of the header of the *
- * spurious message captured by II. Valid when the SP_MSG bit in ICMR *
- * register is set. *
- * *
- ************************************************************************/
-
-typedef union ii_icsmm_u {
- u64 ii_icsmm_regval;
- struct {
- u64 i_tt_ack_cnt:11;
- u64 i_reserved:53;
- } ii_icsmm_fld_s;
-} ii_icsmm_u_t;
-
-/************************************************************************
- * *
- * This register contains the microscopic state, all the inputs to *
- * the protocol table, captured with the spurious message. Valid when *
- * the SP_MSG bit in the ICMR register is set. *
- * *
- ************************************************************************/
-
-typedef union ii_icsmh_u {
- u64 ii_icsmh_regval;
- struct {
- u64 i_tt_vld:1;
- u64 i_xerr:1;
- u64 i_ft_cwact_o:1;
- u64 i_ft_wact_o:1;
- u64 i_ft_active_o:1;
- u64 i_sync:1;
- u64 i_mnusg:1;
- u64 i_mnusz:1;
- u64 i_plusz:1;
- u64 i_plusg:1;
- u64 i_tt_exc:5;
- u64 i_tt_wb:1;
- u64 i_tt_hold:1;
- u64 i_tt_ack:1;
- u64 i_tt_resp:1;
- u64 i_tt_intvn:1;
- u64 i_g_stall_bte1:1;
- u64 i_g_stall_bte0:1;
- u64 i_g_stall_il:1;
- u64 i_g_stall_ib:1;
- u64 i_tt_imsg:8;
- u64 i_tt_imsgtype:2;
- u64 i_tt_use_old:1;
- u64 i_tt_respreqd:1;
- u64 i_tt_bte_num:1;
- u64 i_cbn:1;
- u64 i_match:1;
- u64 i_rpcnt_lt_34:1;
- u64 i_rpcnt_ge_34:1;
- u64 i_rpcnt_lt_18:1;
- u64 i_rpcnt_ge_18:1;
- u64 i_rpcnt_lt_2:1;
- u64 i_rpcnt_ge_2:1;
- u64 i_rqcnt_lt_18:1;
- u64 i_rqcnt_ge_18:1;
- u64 i_rqcnt_lt_2:1;
- u64 i_rqcnt_ge_2:1;
- u64 i_tt_device:7;
- u64 i_tt_init:3;
- u64 i_reserved:5;
- } ii_icsmh_fld_s;
-} ii_icsmh_u_t;
-
-/************************************************************************
- * *
- * The Shub DEBUG unit provides a 3-bit selection signal to the *
- * II core and a 3-bit selection signal to the fsbclk domain in the II *
- * wrapper. *
- * *
- ************************************************************************/
-
-typedef union ii_idbss_u {
- u64 ii_idbss_regval;
- struct {
- u64 i_iioclk_core_submenu:3;
- u64 i_rsvd:5;
- u64 i_fsbclk_wrapper_submenu:3;
- u64 i_rsvd_1:5;
- u64 i_iioclk_menu:5;
- u64 i_rsvd_2:43;
- } ii_idbss_fld_s;
-} ii_idbss_u_t;
-
-/************************************************************************
- * *
- * Description: This register is used to set up the length for a *
- * transfer and then to monitor the progress of that transfer. This *
- * register needs to be initialized before a transfer is started. A *
- * legitimate write to this register will set the Busy bit, clear the *
- * Error bit, and initialize the length to the value desired. *
- * While the transfer is in progress, hardware will decrement the *
- * length field with each successful block that is copied. Once the *
- * transfer completes, hardware will clear the Busy bit. The length *
- * field will also contain the number of cache lines left to be *
- * transferred. *
- * *
- ************************************************************************/
-
-typedef union ii_ibls0_u {
- u64 ii_ibls0_regval;
- struct {
- u64 i_length:16;
- u64 i_error:1;
- u64 i_rsvd_1:3;
- u64 i_busy:1;
- u64 i_rsvd:43;
- } ii_ibls0_fld_s;
-} ii_ibls0_u_t;
-
-/************************************************************************
- * *
- * This register should be loaded before a transfer is started. The *
- * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
- * address as described in Section 1.3, Figure2 and Figure3. Since *
- * the bottom 7 bits of the address are always taken to be zero, BTE *
- * transfers are always cacheline-aligned. *
- * *
- ************************************************************************/
-
-typedef union ii_ibsa0_u {
- u64 ii_ibsa0_regval;
- struct {
- u64 i_rsvd_1:7;
- u64 i_addr:42;
- u64 i_rsvd:15;
- } ii_ibsa0_fld_s;
-} ii_ibsa0_u_t;
-
-/************************************************************************
- * *
- * This register should be loaded before a transfer is started. The *
- * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
- * address as described in Section 1.3, Figure2 and Figure3. Since *
- * the bottom 7 bits of the address are always taken to be zero, BTE *
- * transfers are always cacheline-aligned. *
- * *
- ************************************************************************/
-
-typedef union ii_ibda0_u {
- u64 ii_ibda0_regval;
- struct {
- u64 i_rsvd_1:7;
- u64 i_addr:42;
- u64 i_rsvd:15;
- } ii_ibda0_fld_s;
-} ii_ibda0_u_t;
-
-/************************************************************************
- * *
- * Writing to this register sets up the attributes of the transfer *
- * and initiates the transfer operation. Reading this register has *
- * the side effect of terminating any transfer in progress. Note: *
- * stopping a transfer midstream could have an adverse impact on the *
- * other BTE. If a BTE stream has to be stopped (due to error *
- * handling for example), both BTE streams should be stopped and *
- * their transfers discarded. *
- * *
- ************************************************************************/
-
-typedef union ii_ibct0_u {
- u64 ii_ibct0_regval;
- struct {
- u64 i_zerofill:1;
- u64 i_rsvd_2:3;
- u64 i_notify:1;
- u64 i_rsvd_1:3;
- u64 i_poison:1;
- u64 i_rsvd:55;
- } ii_ibct0_fld_s;
-} ii_ibct0_u_t;
-
-/************************************************************************
- * *
- * This register contains the address to which the WINV is sent. *
- * This address has to be cache line aligned. *
- * *
- ************************************************************************/
-
-typedef union ii_ibna0_u {
- u64 ii_ibna0_regval;
- struct {
- u64 i_rsvd_1:7;
- u64 i_addr:42;
- u64 i_rsvd:15;
- } ii_ibna0_fld_s;
-} ii_ibna0_u_t;
-
-/************************************************************************
- * *
- * This register contains the programmable level as well as the node *
- * ID and PI unit of the processor to which the interrupt will be *
- * sent. *
- * *
- ************************************************************************/
-
-typedef union ii_ibia0_u {
- u64 ii_ibia0_regval;
- struct {
- u64 i_rsvd_2:1;
- u64 i_node_id:11;
- u64 i_rsvd_1:4;
- u64 i_level:7;
- u64 i_rsvd:41;
- } ii_ibia0_fld_s;
-} ii_ibia0_u_t;
-
-/************************************************************************
- * *
- * Description: This register is used to set up the length for a *
- * transfer and then to monitor the progress of that transfer. This *
- * register needs to be initialized before a transfer is started. A *
- * legitimate write to this register will set the Busy bit, clear the *
- * Error bit, and initialize the length to the value desired. *
- * While the transfer is in progress, hardware will decrement the *
- * length field with each successful block that is copied. Once the *
- * transfer completes, hardware will clear the Busy bit. The length *
- * field will also contain the number of cache lines left to be *
- * transferred. *
- * *
- ************************************************************************/
-
-typedef union ii_ibls1_u {
- u64 ii_ibls1_regval;
- struct {
- u64 i_length:16;
- u64 i_error:1;
- u64 i_rsvd_1:3;
- u64 i_busy:1;
- u64 i_rsvd:43;
- } ii_ibls1_fld_s;
-} ii_ibls1_u_t;
-
-/************************************************************************
- * *
- * This register should be loaded before a transfer is started. The *
- * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
- * address as described in Section 1.3, Figure2 and Figure3. Since *
- * the bottom 7 bits of the address are always taken to be zero, BTE *
- * transfers are always cacheline-aligned. *
- * *
- ************************************************************************/
-
-typedef union ii_ibsa1_u {
- u64 ii_ibsa1_regval;
- struct {
- u64 i_rsvd_1:7;
- u64 i_addr:33;
- u64 i_rsvd:24;
- } ii_ibsa1_fld_s;
-} ii_ibsa1_u_t;
-
-/************************************************************************
- * *
- * This register should be loaded before a transfer is started. The *
- * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
- * address as described in Section 1.3, Figure2 and Figure3. Since *
- * the bottom 7 bits of the address are always taken to be zero, BTE *
- * transfers are always cacheline-aligned. *
- * *
- ************************************************************************/
-
-typedef union ii_ibda1_u {
- u64 ii_ibda1_regval;
- struct {
- u64 i_rsvd_1:7;
- u64 i_addr:33;
- u64 i_rsvd:24;
- } ii_ibda1_fld_s;
-} ii_ibda1_u_t;
-
-/************************************************************************
- * *
- * Writing to this register sets up the attributes of the transfer *
- * and initiates the transfer operation. Reading this register has *
- * the side effect of terminating any transfer in progress. Note: *
- * stopping a transfer midstream could have an adverse impact on the *
- * other BTE. If a BTE stream has to be stopped (due to error *
- * handling for example), both BTE streams should be stopped and *
- * their transfers discarded. *
- * *
- ************************************************************************/
-
-typedef union ii_ibct1_u {
- u64 ii_ibct1_regval;
- struct {
- u64 i_zerofill:1;
- u64 i_rsvd_2:3;
- u64 i_notify:1;
- u64 i_rsvd_1:3;
- u64 i_poison:1;
- u64 i_rsvd:55;
- } ii_ibct1_fld_s;
-} ii_ibct1_u_t;
-
-/************************************************************************
- * *
- * This register contains the address to which the WINV is sent. *
- * This address has to be cache line aligned. *
- * *
- ************************************************************************/
-
-typedef union ii_ibna1_u {
- u64 ii_ibna1_regval;
- struct {
- u64 i_rsvd_1:7;
- u64 i_addr:33;
- u64 i_rsvd:24;
- } ii_ibna1_fld_s;
-} ii_ibna1_u_t;
-
-/************************************************************************
- * *
- * This register contains the programmable level as well as the node *
- * ID and PI unit of the processor to which the interrupt will be *
- * sent. *
- * *
- ************************************************************************/
-
-typedef union ii_ibia1_u {
- u64 ii_ibia1_regval;
- struct {
- u64 i_pi_id:1;
- u64 i_node_id:8;
- u64 i_rsvd_1:7;
- u64 i_level:7;
- u64 i_rsvd:41;
- } ii_ibia1_fld_s;
-} ii_ibia1_u_t;
-
-/************************************************************************
- * *
- * This register defines the resources that feed information into *
- * the two performance counters located in the IO Performance *
- * Profiling Register. There are 17 different quantities that can be *
- * measured. Given these 17 different options, the two performance *
- * counters have 15 of them in common; menu selections 0 through 0xE *
- * are identical for each performance counter. As for the other two *
- * options, one is available from one performance counter and the *
- * other is available from the other performance counter. Hence, the *
- * II supports all 17*16=272 possible combinations of quantities to *
- * measure. *
- * *
- ************************************************************************/
-
-typedef union ii_ipcr_u {
- u64 ii_ipcr_regval;
- struct {
- u64 i_ippr0_c:4;
- u64 i_ippr1_c:4;
- u64 i_icct:8;
- u64 i_rsvd:48;
- } ii_ipcr_fld_s;
-} ii_ipcr_u_t;
-
-/************************************************************************
- * *
- * *
- * *
- ************************************************************************/
-
-typedef union ii_ippr_u {
- u64 ii_ippr_regval;
- struct {
- u64 i_ippr0:32;
- u64 i_ippr1:32;
- } ii_ippr_fld_s;
-} ii_ippr_u_t;
-
-/************************************************************************
- * *
- * The following defines which were not formed into structures are *
- * probably indentical to another register, and the name of the *
- * register is provided against each of these registers. This *
- * information needs to be checked carefully *
- * *
- * IIO_ICRB1_A IIO_ICRB0_A *
- * IIO_ICRB1_B IIO_ICRB0_B *
- * IIO_ICRB1_C IIO_ICRB0_C *
- * IIO_ICRB1_D IIO_ICRB0_D *
- * IIO_ICRB1_E IIO_ICRB0_E *
- * IIO_ICRB2_A IIO_ICRB0_A *
- * IIO_ICRB2_B IIO_ICRB0_B *
- * IIO_ICRB2_C IIO_ICRB0_C *
- * IIO_ICRB2_D IIO_ICRB0_D *
- * IIO_ICRB2_E IIO_ICRB0_E *
- * IIO_ICRB3_A IIO_ICRB0_A *
- * IIO_ICRB3_B IIO_ICRB0_B *
- * IIO_ICRB3_C IIO_ICRB0_C *
- * IIO_ICRB3_D IIO_ICRB0_D *
- * IIO_ICRB3_E IIO_ICRB0_E *
- * IIO_ICRB4_A IIO_ICRB0_A *
- * IIO_ICRB4_B IIO_ICRB0_B *
- * IIO_ICRB4_C IIO_ICRB0_C *
- * IIO_ICRB4_D IIO_ICRB0_D *
- * IIO_ICRB4_E IIO_ICRB0_E *
- * IIO_ICRB5_A IIO_ICRB0_A *
- * IIO_ICRB5_B IIO_ICRB0_B *
- * IIO_ICRB5_C IIO_ICRB0_C *
- * IIO_ICRB5_D IIO_ICRB0_D *
- * IIO_ICRB5_E IIO_ICRB0_E *
- * IIO_ICRB6_A IIO_ICRB0_A *
- * IIO_ICRB6_B IIO_ICRB0_B *
- * IIO_ICRB6_C IIO_ICRB0_C *
- * IIO_ICRB6_D IIO_ICRB0_D *
- * IIO_ICRB6_E IIO_ICRB0_E *
- * IIO_ICRB7_A IIO_ICRB0_A *
- * IIO_ICRB7_B IIO_ICRB0_B *
- * IIO_ICRB7_C IIO_ICRB0_C *
- * IIO_ICRB7_D IIO_ICRB0_D *
- * IIO_ICRB7_E IIO_ICRB0_E *
- * IIO_ICRB8_A IIO_ICRB0_A *
- * IIO_ICRB8_B IIO_ICRB0_B *
- * IIO_ICRB8_C IIO_ICRB0_C *
- * IIO_ICRB8_D IIO_ICRB0_D *
- * IIO_ICRB8_E IIO_ICRB0_E *
- * IIO_ICRB9_A IIO_ICRB0_A *
- * IIO_ICRB9_B IIO_ICRB0_B *
- * IIO_ICRB9_C IIO_ICRB0_C *
- * IIO_ICRB9_D IIO_ICRB0_D *
- * IIO_ICRB9_E IIO_ICRB0_E *
- * IIO_ICRBA_A IIO_ICRB0_A *
- * IIO_ICRBA_B IIO_ICRB0_B *
- * IIO_ICRBA_C IIO_ICRB0_C *
- * IIO_ICRBA_D IIO_ICRB0_D *
- * IIO_ICRBA_E IIO_ICRB0_E *
- * IIO_ICRBB_A IIO_ICRB0_A *
- * IIO_ICRBB_B IIO_ICRB0_B *
- * IIO_ICRBB_C IIO_ICRB0_C *
- * IIO_ICRBB_D IIO_ICRB0_D *
- * IIO_ICRBB_E IIO_ICRB0_E *
- * IIO_ICRBC_A IIO_ICRB0_A *
- * IIO_ICRBC_B IIO_ICRB0_B *
- * IIO_ICRBC_C IIO_ICRB0_C *
- * IIO_ICRBC_D IIO_ICRB0_D *
- * IIO_ICRBC_E IIO_ICRB0_E *
- * IIO_ICRBD_A IIO_ICRB0_A *
- * IIO_ICRBD_B IIO_ICRB0_B *
- * IIO_ICRBD_C IIO_ICRB0_C *
- * IIO_ICRBD_D IIO_ICRB0_D *
- * IIO_ICRBD_E IIO_ICRB0_E *
- * IIO_ICRBE_A IIO_ICRB0_A *
- * IIO_ICRBE_B IIO_ICRB0_B *
- * IIO_ICRBE_C IIO_ICRB0_C *
- * IIO_ICRBE_D IIO_ICRB0_D *
- * IIO_ICRBE_E IIO_ICRB0_E *
- * *
- ************************************************************************/
-
-/*
- * Slightly friendlier names for some common registers.
- */
-#define IIO_WIDGET IIO_WID /* Widget identification */
-#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */
-#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */
-#define IIO_PROTECT IIO_ILAPR /* IO interface protection */
-#define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */
-#define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */
-#define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */
-#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */
-#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */
-#define IIO_LLP_LOG IIO_ILLR /* LLP log */
-#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout */
-#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */
-#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */
-#define IIO_IGFX_0 IIO_IGFX0
-#define IIO_IGFX_1 IIO_IGFX1
-#define IIO_IBCT_0 IIO_IBCT0
-#define IIO_IBCT_1 IIO_IBCT1
-#define IIO_IBLS_0 IIO_IBLS0
-#define IIO_IBLS_1 IIO_IBLS1
-#define IIO_IBSA_0 IIO_IBSA0
-#define IIO_IBSA_1 IIO_IBSA1
-#define IIO_IBDA_0 IIO_IBDA0
-#define IIO_IBDA_1 IIO_IBDA1
-#define IIO_IBNA_0 IIO_IBNA0
-#define IIO_IBNA_1 IIO_IBNA1
-#define IIO_IBIA_0 IIO_IBIA0
-#define IIO_IBIA_1 IIO_IBIA1
-#define IIO_IOPRB_0 IIO_IPRB0
-
-#define IIO_PRTE_A(_x) (IIO_IPRTE0_A + (8 * (_x)))
-#define IIO_PRTE_B(_x) (IIO_IPRTE0_B + (8 * (_x)))
-#define IIO_NUM_PRTES 8 /* Total number of PRB table entries */
-#define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */
-#define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */
-
-#define IIO_NUM_IPRBS 9
-
-#define IIO_LLP_CSR_IS_UP 0x00002000
-#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
-#define IIO_LLP_CSR_LLP_STAT_SHFT 12
-
-#define IIO_LLP_CB_MAX 0xffff /* in ILLR CB_CNT, Max Check Bit errors */
-#define IIO_LLP_SN_MAX 0xffff /* in ILLR SN_CNT, Max Sequence Number errors */
-
-/* key to IIO_PROTECT_OVRRD */
-#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */
-
-/* BTE register names */
-#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */
-#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */
-#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */
-#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */
-#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */
-#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */
-#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */
-#define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */
-
-/* BTE register offsets from base */
-#define BTEOFF_STAT 0
-#define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0)
-#define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0)
-#define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0)
-#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0)
-#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0)
-
-/* names used in shub diags */
-#define IIO_BASE_BTE0 IIO_IBLS_0
-#define IIO_BASE_BTE1 IIO_IBLS_1
-
-/*
- * Macro which takes the widget number, and returns the
- * IO PRB address of that widget.
- * value _x is expected to be a widget number in the range
- * 0, 8 - 0xF
- */
-#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
- (_x) : \
- (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
-
-/* GFX Flow Control Node/Widget Register */
-#define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */
-#define IIO_IGFX_W_NUM_MASK ((1<<IIO_IGFX_W_NUM_BITS)-1)
-#define IIO_IGFX_W_NUM_SHIFT 0
-#define IIO_IGFX_PI_NUM_BITS 1 /* size of PI num field */
-#define IIO_IGFX_PI_NUM_MASK ((1<<IIO_IGFX_PI_NUM_BITS)-1)
-#define IIO_IGFX_PI_NUM_SHIFT 4
-#define IIO_IGFX_N_NUM_BITS 8 /* size of node num field */
-#define IIO_IGFX_N_NUM_MASK ((1<<IIO_IGFX_N_NUM_BITS)-1)
-#define IIO_IGFX_N_NUM_SHIFT 5
-#define IIO_IGFX_P_NUM_BITS 1 /* size of processor num field */
-#define IIO_IGFX_P_NUM_MASK ((1<<IIO_IGFX_P_NUM_BITS)-1)
-#define IIO_IGFX_P_NUM_SHIFT 16
-#define IIO_IGFX_INIT(widget, pi, node, cpu) (\
- (((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) | \
- (((pi) & IIO_IGFX_PI_NUM_MASK)<< IIO_IGFX_PI_NUM_SHIFT)| \
- (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \
- (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT))
-
-/* Scratch registers (all bits available) */
-#define IIO_SCRATCH_REG0 IIO_ISCR0
-#define IIO_SCRATCH_REG1 IIO_ISCR1
-#define IIO_SCRATCH_MASK 0xffffffffffffffffUL
-
-#define IIO_SCRATCH_BIT0_0 0x0000000000000001UL
-#define IIO_SCRATCH_BIT0_1 0x0000000000000002UL
-#define IIO_SCRATCH_BIT0_2 0x0000000000000004UL
-#define IIO_SCRATCH_BIT0_3 0x0000000000000008UL
-#define IIO_SCRATCH_BIT0_4 0x0000000000000010UL
-#define IIO_SCRATCH_BIT0_5 0x0000000000000020UL
-#define IIO_SCRATCH_BIT0_6 0x0000000000000040UL
-#define IIO_SCRATCH_BIT0_7 0x0000000000000080UL
-#define IIO_SCRATCH_BIT0_8 0x0000000000000100UL
-#define IIO_SCRATCH_BIT0_9 0x0000000000000200UL
-#define IIO_SCRATCH_BIT0_A 0x0000000000000400UL
-
-#define IIO_SCRATCH_BIT1_0 0x0000000000000001UL
-#define IIO_SCRATCH_BIT1_1 0x0000000000000002UL
-/* IO Translation Table Entries */
-#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */
- /* Hw manuals number them 1..7! */
-/*
- * IIO_IMEM Register fields.
- */
-#define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */
-#define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */
-#define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */
-
-/*
- * As a permanent workaround for a bug in the PI side of the shub, we've
- * redefined big window 7 as small window 0.
- XXX does this still apply for SN1??
- */
-#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
-
-/*
- * Use the top big window as a surrogate for the first small window
- */
-#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
-
-#define ILCSR_WARM_RESET 0x100
-
-/*
- * CRB manipulation macros
- * The CRB macros are slightly complicated, since there are up to
- * four registers associated with each CRB entry.
- */
-#define IIO_NUM_CRBS 15 /* Number of CRBs */
-#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */
-#define IIO_ICRB_OFFSET 8
-#define IIO_ICRB_0 IIO_ICRB0_A
-#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */
-/* XXX - This is now tuneable:
- #define IIO_FIRST_PC_ENTRY 12
- */
-
-#define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x))))
-#define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET))
-#define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET))
-#define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET))
-#define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET))
-
-#define TNUM_TO_WIDGET_DEV(_tnum) (_tnum & 0x7)
-
-/*
- * values for "ecode" field
- */
-#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */
-#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */
-#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access
- * e.g. WINV to a Read only line. */
-#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */
-#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */
-#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */
-#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */
-#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */
-
-/*
- * Values for field imsgtype
- */
-#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
-#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
-#define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */
-#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
-
-/*
- * values for field initiator.
- */
-#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */
-#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */
-#define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */
-#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */
-#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */
-
-/*
- * Number of credits Hub widget has while sending req/response to
- * xbow.
- * Value of 3 is required by Xbow 1.1
- * We may be able to increase this to 4 with Xbow 1.2.
- */
-#define HUBII_XBOW_CREDIT 3
-#define HUBII_XBOW_REV2_CREDIT 4
-
-/*
- * Number of credits that xtalk devices should use when communicating
- * with a SHub (depth of SHub's queue).
- */
-#define HUB_CREDIT 4
-
-/*
- * Some IIO_PRB fields
- */
-#define IIO_PRB_MULTI_ERR (1LL << 63)
-#define IIO_PRB_SPUR_RD (1LL << 51)
-#define IIO_PRB_SPUR_WR (1LL << 50)
-#define IIO_PRB_RD_TO (1LL << 49)
-#define IIO_PRB_ERROR (1LL << 48)
-
-/*************************************************************************
-
- Some of the IIO field masks and shifts are defined here.
- This is in order to maintain compatibility in SN0 and SN1 code
-
-**************************************************************************/
-
-/*
- * ICMR register fields
- * (Note: the IIO_ICMR_P_CNT and IIO_ICMR_PC_VLD from Hub are not
- * present in SHub)
- */
-
-#define IIO_ICMR_CRB_VLD_SHFT 20
-#define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT)
-
-#define IIO_ICMR_FC_CNT_SHFT 16
-#define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT)
-
-#define IIO_ICMR_C_CNT_SHFT 4
-#define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT)
-
-#define IIO_ICMR_PRECISE (1UL << 52)
-#define IIO_ICMR_CLR_RPPD (1UL << 13)
-#define IIO_ICMR_CLR_RQPD (1UL << 12)
-
-/*
- * IIO PIO Deallocation register field masks : (IIO_IPDR)
- XXX present but not needed in bedrock? See the manual.
- */
-#define IIO_IPDR_PND (1 << 4)
-
-/*
- * IIO CRB deallocation register field masks: (IIO_ICDR)
- */
-#define IIO_ICDR_PND (1 << 4)
-
-/*
- * IO BTE Length/Status (IIO_IBLS) register bit field definitions
- */
-#define IBLS_BUSY (0x1UL << 20)
-#define IBLS_ERROR_SHFT 16
-#define IBLS_ERROR (0x1UL << IBLS_ERROR_SHFT)
-#define IBLS_LENGTH_MASK 0xffff
-
-/*
- * IO BTE Control/Terminate register (IBCT) register bit field definitions
- */
-#define IBCT_POISON (0x1UL << 8)
-#define IBCT_NOTIFY (0x1UL << 4)
-#define IBCT_ZFIL_MODE (0x1UL << 0)
-
-/*
- * IIO Incoming Error Packet Header (IIO_IIEPH1/IIO_IIEPH2)
- */
-#define IIEPH1_VALID (1UL << 44)
-#define IIEPH1_OVERRUN (1UL << 40)
-#define IIEPH1_ERR_TYPE_SHFT 32
-#define IIEPH1_ERR_TYPE_MASK 0xf
-#define IIEPH1_SOURCE_SHFT 20
-#define IIEPH1_SOURCE_MASK 11
-#define IIEPH1_SUPPL_SHFT 8
-#define IIEPH1_SUPPL_MASK 11
-#define IIEPH1_CMD_SHFT 0
-#define IIEPH1_CMD_MASK 7
-
-#define IIEPH2_TAIL (1UL << 40)
-#define IIEPH2_ADDRESS_SHFT 0
-#define IIEPH2_ADDRESS_MASK 38
-
-#define IIEPH1_ERR_SHORT_REQ 2
-#define IIEPH1_ERR_SHORT_REPLY 3
-#define IIEPH1_ERR_LONG_REQ 4
-#define IIEPH1_ERR_LONG_REPLY 5
-
-/*
- * IO Error Clear register bit field definitions
- */
-#define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */
-#define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */
-#define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */
-#define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */
-#define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */
-#define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */
-#define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */
-#define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */
-#define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */
-#define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */
-#define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */
-#define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */
-#define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */
-#define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */
-#define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */
-
-/*
- * IIO CRB control register Fields: IIO_ICCR
- */
-#define IIO_ICCR_PENDING 0x10000
-#define IIO_ICCR_CMD_MASK 0xFF
-#define IIO_ICCR_CMD_SHFT 7
-#define IIO_ICCR_CMD_NOP 0x0 /* No Op */
-#define IIO_ICCR_CMD_WAKE 0x100 /* Reactivate CRB entry and process */
-#define IIO_ICCR_CMD_TIMEOUT 0x200 /* Make CRB timeout & mark invalid */
-#define IIO_ICCR_CMD_EJECT 0x400 /* Contents of entry written to memory
- * via a WB
- */
-#define IIO_ICCR_CMD_FLUSH 0x800
-
-/*
- *
- * CRB Register description.
- *
- * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
- * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
- * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
- * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
- * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
- *
- * Many of the fields in CRB are status bits used by hardware
- * for implementation of the protocol. It's very dangerous to
- * mess around with the CRB registers.
- *
- * It's OK to read the CRB registers and try to make sense out of the
- * fields in CRB.
- *
- * Updating CRB requires all activities in Hub IIO to be quiesced.
- * otherwise, a write to CRB could corrupt other CRB entries.
- * CRBs are here only as a back door peek to shub IIO's status.
- * Quiescing implies no dmas no PIOs
- * either directly from the cpu or from sn0net.
- * this is not something that can be done easily. So, AVOID updating
- * CRBs.
- */
-
-/*
- * Easy access macros for CRBs, all 5 registers (A-E)
- */
-typedef ii_icrb0_a_u_t icrba_t;
-#define a_sidn ii_icrb0_a_fld_s.ia_sidn
-#define a_tnum ii_icrb0_a_fld_s.ia_tnum
-#define a_addr ii_icrb0_a_fld_s.ia_addr
-#define a_valid ii_icrb0_a_fld_s.ia_vld
-#define a_iow ii_icrb0_a_fld_s.ia_iow
-#define a_regvalue ii_icrb0_a_regval
-
-typedef ii_icrb0_b_u_t icrbb_t;
-#define b_use_old ii_icrb0_b_fld_s.ib_use_old
-#define b_imsgtype ii_icrb0_b_fld_s.ib_imsgtype
-#define b_imsg ii_icrb0_b_fld_s.ib_imsg
-#define b_initiator ii_icrb0_b_fld_s.ib_init
-#define b_exc ii_icrb0_b_fld_s.ib_exc
-#define b_ackcnt ii_icrb0_b_fld_s.ib_ack_cnt
-#define b_resp ii_icrb0_b_fld_s.ib_resp
-#define b_ack ii_icrb0_b_fld_s.ib_ack
-#define b_hold ii_icrb0_b_fld_s.ib_hold
-#define b_wb ii_icrb0_b_fld_s.ib_wb
-#define b_intvn ii_icrb0_b_fld_s.ib_intvn
-#define b_stall_ib ii_icrb0_b_fld_s.ib_stall_ib
-#define b_stall_int ii_icrb0_b_fld_s.ib_stall__intr
-#define b_stall_bte_0 ii_icrb0_b_fld_s.ib_stall__bte_0
-#define b_stall_bte_1 ii_icrb0_b_fld_s.ib_stall__bte_1
-#define b_error ii_icrb0_b_fld_s.ib_error
-#define b_ecode ii_icrb0_b_fld_s.ib_errcode
-#define b_lnetuce ii_icrb0_b_fld_s.ib_ln_uce
-#define b_mark ii_icrb0_b_fld_s.ib_mark
-#define b_xerr ii_icrb0_b_fld_s.ib_xt_err
-#define b_regvalue ii_icrb0_b_regval
-
-typedef ii_icrb0_c_u_t icrbc_t;
-#define c_suppl ii_icrb0_c_fld_s.ic_suppl
-#define c_barrop ii_icrb0_c_fld_s.ic_bo
-#define c_doresp ii_icrb0_c_fld_s.ic_resprqd
-#define c_gbr ii_icrb0_c_fld_s.ic_gbr
-#define c_btenum ii_icrb0_c_fld_s.ic_bte_num
-#define c_cohtrans ii_icrb0_c_fld_s.ic_ct
-#define c_xtsize ii_icrb0_c_fld_s.ic_size
-#define c_source ii_icrb0_c_fld_s.ic_source
-#define c_regvalue ii_icrb0_c_regval
-
-typedef ii_icrb0_d_u_t icrbd_t;
-#define d_sleep ii_icrb0_d_fld_s.id_sleep
-#define d_pricnt ii_icrb0_d_fld_s.id_pr_cnt
-#define d_pripsc ii_icrb0_d_fld_s.id_pr_psc
-#define d_bteop ii_icrb0_d_fld_s.id_bte_op
-#define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */
-#define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */
-#define d_regvalue ii_icrb0_d_regval
-
-typedef ii_icrb0_e_u_t icrbe_t;
-#define icrbe_ctxtvld ii_icrb0_e_fld_s.ie_cvld
-#define icrbe_toutvld ii_icrb0_e_fld_s.ie_tvld
-#define icrbe_context ii_icrb0_e_fld_s.ie_context
-#define icrbe_timeout ii_icrb0_e_fld_s.ie_timeout
-#define e_regvalue ii_icrb0_e_regval
-
-/* Number of widgets supported by shub */
-#define HUB_NUM_WIDGET 9
-#define HUB_WIDGET_ID_MIN 0x8
-#define HUB_WIDGET_ID_MAX 0xf
-
-#define HUB_WIDGET_PART_NUM 0xc120
-#define MAX_HUBS_PER_XBOW 2
-
-/* A few more #defines for backwards compatibility */
-#define iprb_t ii_iprb0_u_t
-#define iprb_regval ii_iprb0_regval
-#define iprb_mult_err ii_iprb0_fld_s.i_mult_err
-#define iprb_spur_rd ii_iprb0_fld_s.i_spur_rd
-#define iprb_spur_wr ii_iprb0_fld_s.i_spur_wr
-#define iprb_rd_to ii_iprb0_fld_s.i_rd_to
-#define iprb_ovflow ii_iprb0_fld_s.i_of_cnt
-#define iprb_error ii_iprb0_fld_s.i_error
-#define iprb_ff ii_iprb0_fld_s.i_f
-#define iprb_mode ii_iprb0_fld_s.i_m
-#define iprb_bnakctr ii_iprb0_fld_s.i_nb
-#define iprb_anakctr ii_iprb0_fld_s.i_na
-#define iprb_xtalkctr ii_iprb0_fld_s.i_c
-
-#define LNK_STAT_WORKING 0x2 /* LLP is working */
-
-#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
-#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
-#define IIO_WSTAT_TXRETRY_MASK 0x7F /* should be 0xFF?? */
-#define IIO_WSTAT_TXRETRY_SHFT 16
-#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
- IIO_WSTAT_TXRETRY_MASK)
-
-/* Number of II perf. counters we can multiplex at once */
-
-#define IO_PERF_SETS 32
-
-/* Bit for the widget in inbound access register */
-#define IIO_IIWA_WIDGET(_w) ((u64)(1ULL << _w))
-/* Bit for the widget in outbound access register */
-#define IIO_IOWA_WIDGET(_w) ((u64)(1ULL << _w))
-
-/* NOTE: The following define assumes that we are going to get
- * widget numbers from 8 thru F and the device numbers within
- * widget from 0 thru 7.
- */
-#define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((u64)(1ULL << (8 * ((w) - 8) + (d))))
-
-/* IO Interrupt Destination Register */
-#define IIO_IIDSR_SENT_SHIFT 28
-#define IIO_IIDSR_SENT_MASK 0x30000000
-#define IIO_IIDSR_ENB_SHIFT 24
-#define IIO_IIDSR_ENB_MASK 0x01000000
-#define IIO_IIDSR_NODE_SHIFT 9
-#define IIO_IIDSR_NODE_MASK 0x000ff700
-#define IIO_IIDSR_PI_ID_SHIFT 8
-#define IIO_IIDSR_PI_ID_MASK 0x00000100
-#define IIO_IIDSR_LVL_SHIFT 0
-#define IIO_IIDSR_LVL_MASK 0x000000ff
-
-/* Xtalk timeout threshhold register (IIO_IXTT) */
-#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */
-#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT)
-#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */
-#define IXTT_RRSP_PS_MASK (0x7FFFFFULL << IXTT_RRSP_PS_SHFT)
-#define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */
-#define IXTT_TAIL_TO_MASK (0x3FFFFFFULL << IXTT_TAIL_TO_SHFT)
-
-/*
- * The IO LLP control status register and widget control register
- */
-
-typedef union hubii_wcr_u {
- u64 wcr_reg_value;
- struct {
- u64 wcr_widget_id:4, /* LLP crossbar credit */
- wcr_tag_mode:1, /* Tag mode */
- wcr_rsvd1:8, /* Reserved */
- wcr_xbar_crd:3, /* LLP crossbar credit */
- wcr_f_bad_pkt:1, /* Force bad llp pkt enable */
- wcr_dir_con:1, /* widget direct connect */
- wcr_e_thresh:5, /* elasticity threshold */
- wcr_rsvd:41; /* unused */
- } wcr_fields_s;
-} hubii_wcr_t;
-
-#define iwcr_dir_con wcr_fields_s.wcr_dir_con
-
-/* The structures below are defined to extract and modify the ii
-performance registers */
-
-/* io_perf_sel allows the caller to specify what tests will be
- performed */
-
-typedef union io_perf_sel {
- u64 perf_sel_reg;
- struct {
- u64 perf_ippr0:4, perf_ippr1:4, perf_icct:8, perf_rsvd:48;
- } perf_sel_bits;
-} io_perf_sel_t;
-
-/* io_perf_cnt is to extract the count from the shub registers. Due to
- hardware problems there is only one counter, not two. */
-
-typedef union io_perf_cnt {
- u64 perf_cnt;
- struct {
- u64 perf_cnt:20, perf_rsvd2:12, perf_rsvd1:32;
- } perf_cnt_bits;
-
-} io_perf_cnt_t;
-
-typedef union iprte_a {
- u64 entry;
- struct {
- u64 i_rsvd_1:3;
- u64 i_addr:38;
- u64 i_init:3;
- u64 i_source:8;
- u64 i_rsvd:2;
- u64 i_widget:4;
- u64 i_to_cnt:5;
- u64 i_vld:1;
- } iprte_fields;
-} iprte_a_t;
-
-#endif /* _ASM_IA64_SN_SHUBIO_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/simulator.h b/xen/include/asm-ia64/linux/asm/sn/simulator.h
deleted file mode 100644
index c3fd3eb257..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/simulator.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_SIMULATOR_H
-#define _ASM_IA64_SN_SIMULATOR_H
-
-
-#define SNMAGIC 0xaeeeeeee8badbeefL
-#define IS_MEDUSA() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;})
-
-#define SIMULATOR_SLEEP() asm("nop.i 0x8beef")
-#define IS_RUNNING_ON_SIMULATOR() (sn_prom_type)
-#define IS_RUNNING_ON_FAKE_PROM() (sn_prom_type == 2)
-extern int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
-
-#endif /* _ASM_IA64_SN_SIMULATOR_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/sn_cpuid.h b/xen/include/asm-ia64/linux/asm/sn/sn_cpuid.h
deleted file mode 100644
index a676dd9ace..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/sn_cpuid.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-#ifndef _ASM_IA64_SN_SN_CPUID_H
-#define _ASM_IA64_SN_SN_CPUID_H
-
-#include <linux/smp.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/pda.h>
-#include <asm/intrinsics.h>
-
-
-/*
- * Functions for converting between cpuids, nodeids and NASIDs.
- *
- * These are for SGI platforms only.
- *
- */
-
-
-
-
-/*
- * Definitions of terms (these definitions are for IA64 ONLY. Other architectures
- * use cpuid/cpunum quite defferently):
- *
- * CPUID - a number in range of 0..NR_CPUS-1 that uniquely identifies
- * the cpu. The value cpuid has no significance on IA64 other than
- * the boot cpu is 0.
- * smp_processor_id() returns the cpuid of the current cpu.
- *
- * CPU_PHYSICAL_ID (also known as HARD_PROCESSOR_ID)
- * This is the same as 31:24 of the processor LID register
- * hard_smp_processor_id()- cpu_physical_id of current processor
- * cpu_physical_id(cpuid) - convert a <cpuid> to a <physical_cpuid>
- * cpu_logical_id(phy_id) - convert a <physical_cpuid> to a <cpuid>
- * * not real efficient - don't use in perf critical code
- *
- * SLICE - a number in the range of 0 - 3 (typically) that represents the
- * cpu number on a brick.
- *
- * SUBNODE - (almost obsolete) the number of the FSB that a cpu is
- * connected to. This is also the same as the PI number. Usually 0 or 1.
- *
- * NOTE!!!: the value of the bits in the cpu physical id (SAPICid or LID) of a cpu has no
- * significance. The SAPIC id (LID) is a 16-bit cookie that has meaning only to the PROM.
- *
- *
- * The macros convert between cpu physical ids & slice/nasid/cnodeid.
- * These terms are described below:
- *
- *
- * Brick
- * ----- ----- ----- ----- CPU
- * | 0 | | 1 | | 0 | | 1 | SLICE
- * ----- ----- ----- -----
- * | | | |
- * | | | |
- * 0 | | 2 0 | | 2 FSB SLOT
- * ------- -------
- * | |
- * | |
- * | |
- * ------------ -------------
- * | | | |
- * | SHUB | | SHUB | NASID (0..MAX_NASIDS)
- * | |----- | | CNODEID (0..num_compact_nodes-1)
- * | | | |
- * | | | |
- * ------------ -------------
- * | |
- *
- *
- */
-
-#define get_node_number(addr) NASID_GET(addr)
-
-/*
- * NOTE: on non-MP systems, only cpuid 0 exists
- */
-
-extern short physical_node_map[]; /* indexed by nasid to get cnode */
-
-/*
- * Macros for retrieving info about current cpu
- */
-#define get_nasid() (sn_nodepda->phys_cpuid[smp_processor_id()].nasid)
-#define get_subnode() (sn_nodepda->phys_cpuid[smp_processor_id()].subnode)
-#define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice)
-#define get_cnode() (sn_nodepda->phys_cpuid[smp_processor_id()].cnode)
-#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
-
-/*
- * Macros for retrieving info about an arbitrary cpu
- * cpuid - logical cpu id
- */
-#define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid)
-#define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode)
-#define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice)
-
-
-/*
- * Dont use the following in performance critical code. They require scans
- * of potentially large tables.
- */
-extern int nasid_slice_to_cpuid(int, int);
-
-/*
- * cnodeid_to_nasid - convert a cnodeid to a NASID
- */
-#define cnodeid_to_nasid(cnodeid) (sn_cnodeid_to_nasid[cnodeid])
-
-/*
- * nasid_to_cnodeid - convert a NASID to a cnodeid
- */
-#define nasid_to_cnodeid(nasid) (physical_node_map[nasid])
-
-/*
- * partition_coherence_id - get the coherence ID of the current partition
- */
-extern u8 sn_coherency_id;
-#define partition_coherence_id() (sn_coherency_id)
-
-#endif /* _ASM_IA64_SN_SN_CPUID_H */
-
diff --git a/xen/include/asm-ia64/linux/asm/sn/sn_feature_sets.h b/xen/include/asm-ia64/linux/asm/sn/sn_feature_sets.h
deleted file mode 100644
index 30dcfa442e..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/sn_feature_sets.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef _ASM_IA64_SN_FEATURE_SETS_H
-#define _ASM_IA64_SN_FEATURE_SETS_H
-
-/*
- * SN PROM Features
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2005-2006 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-/* --------------------- PROM Features -----------------------------*/
-extern int sn_prom_feature_available(int id);
-
-#define MAX_PROM_FEATURE_SETS 2
-
-/*
- * The following defines features that may or may not be supported by the
- * current PROM. The OS uses sn_prom_feature_available(feature) to test for
- * the presence of a PROM feature. Down rev (old) PROMs will always test
- * "false" for new features.
- *
- * Use:
- * if (sn_prom_feature_available(PRF_XXX))
- * ...
- */
-
-#define PRF_PAL_CACHE_FLUSH_SAFE 0
-#define PRF_DEVICE_FLUSH_LIST 1
-#define PRF_HOTPLUG_SUPPORT 2
-
-/* --------------------- OS Features -------------------------------*/
-
-/*
- * The following defines OS features that are optionally present in
- * the operating system.
- * During boot, PROM is notified of these features via a series of calls:
- *
- * ia64_sn_set_os_feature(feature1);
- *
- * Once enabled, a feature cannot be disabled.
- *
- * By default, features are disabled unless explicitly enabled.
- */
-#define OSF_MCA_SLV_TO_OS_INIT_SLV 0
-#define OSF_FEAT_LOG_SBES 1
-
-#endif /* _ASM_IA64_SN_FEATURE_SETS_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/sn_sal.h b/xen/include/asm-ia64/linux/asm/sn/sn_sal.h
deleted file mode 100644
index 2c4004eb5a..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/sn_sal.h
+++ /dev/null
@@ -1,1167 +0,0 @@
-#ifndef _ASM_IA64_SN_SN_SAL_H
-#define _ASM_IA64_SN_SN_SAL_H
-
-/*
- * System Abstraction Layer definitions for IA64
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2000-2006 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-#include <asm/sal.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/geo.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/shub_mmr.h>
-
-// SGI Specific Calls
-#define SN_SAL_POD_MODE 0x02000001
-#define SN_SAL_SYSTEM_RESET 0x02000002
-#define SN_SAL_PROBE 0x02000003
-#define SN_SAL_GET_MASTER_NASID 0x02000004
-#define SN_SAL_GET_KLCONFIG_ADDR 0x02000005
-#define SN_SAL_LOG_CE 0x02000006
-#define SN_SAL_REGISTER_CE 0x02000007
-#define SN_SAL_GET_PARTITION_ADDR 0x02000009
-#define SN_SAL_XP_ADDR_REGION 0x0200000f
-#define SN_SAL_NO_FAULT_ZONE_VIRTUAL 0x02000010
-#define SN_SAL_NO_FAULT_ZONE_PHYSICAL 0x02000011
-#define SN_SAL_PRINT_ERROR 0x02000012
-#define SN_SAL_SET_ERROR_HANDLING_FEATURES 0x0200001a // reentrant
-#define SN_SAL_GET_FIT_COMPT 0x0200001b // reentrant
-#define SN_SAL_GET_SAPIC_INFO 0x0200001d
-#define SN_SAL_GET_SN_INFO 0x0200001e
-#define SN_SAL_CONSOLE_PUTC 0x02000021
-#define SN_SAL_CONSOLE_GETC 0x02000022
-#define SN_SAL_CONSOLE_PUTS 0x02000023
-#define SN_SAL_CONSOLE_GETS 0x02000024
-#define SN_SAL_CONSOLE_GETS_TIMEOUT 0x02000025
-#define SN_SAL_CONSOLE_POLL 0x02000026
-#define SN_SAL_CONSOLE_INTR 0x02000027
-#define SN_SAL_CONSOLE_PUTB 0x02000028
-#define SN_SAL_CONSOLE_XMIT_CHARS 0x0200002a
-#define SN_SAL_CONSOLE_READC 0x0200002b
-#define SN_SAL_SYSCTL_OP 0x02000030
-#define SN_SAL_SYSCTL_MODID_GET 0x02000031
-#define SN_SAL_SYSCTL_GET 0x02000032
-#define SN_SAL_SYSCTL_IOBRICK_MODULE_GET 0x02000033
-#define SN_SAL_SYSCTL_IO_PORTSPEED_GET 0x02000035
-#define SN_SAL_SYSCTL_SLAB_GET 0x02000036
-#define SN_SAL_BUS_CONFIG 0x02000037
-#define SN_SAL_SYS_SERIAL_GET 0x02000038
-#define SN_SAL_PARTITION_SERIAL_GET 0x02000039
-#define SN_SAL_SYSCTL_PARTITION_GET 0x0200003a
-#define SN_SAL_SYSTEM_POWER_DOWN 0x0200003b
-#define SN_SAL_GET_MASTER_BASEIO_NASID 0x0200003c
-#define SN_SAL_COHERENCE 0x0200003d
-#define SN_SAL_MEMPROTECT 0x0200003e
-#define SN_SAL_SYSCTL_FRU_CAPTURE 0x0200003f
-
-#define SN_SAL_SYSCTL_IOBRICK_PCI_OP 0x02000042 // reentrant
-#define SN_SAL_IROUTER_OP 0x02000043
-#define SN_SAL_SYSCTL_EVENT 0x02000044
-#define SN_SAL_IOIF_INTERRUPT 0x0200004a
-#define SN_SAL_HWPERF_OP 0x02000050 // lock
-#define SN_SAL_IOIF_ERROR_INTERRUPT 0x02000051
-#define SN_SAL_IOIF_PCI_SAFE 0x02000052
-#define SN_SAL_IOIF_SLOT_ENABLE 0x02000053
-#define SN_SAL_IOIF_SLOT_DISABLE 0x02000054
-#define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055
-#define SN_SAL_IOIF_GET_PCIBUS_INFO 0x02000056
-#define SN_SAL_IOIF_GET_PCIDEV_INFO 0x02000057
-#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058 // deprecated
-#define SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST 0x0200005a
-
-#define SN_SAL_IOIF_INIT 0x0200005f
-#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060
-#define SN_SAL_BTE_RECOVER 0x02000061
-#define SN_SAL_RESERVED_DO_NOT_USE 0x02000062
-#define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064
-
-#define SN_SAL_GET_PROM_FEATURE_SET 0x02000065
-#define SN_SAL_SET_OS_FEATURE_SET 0x02000066
-#define SN_SAL_INJECT_ERROR 0x02000067
-#define SN_SAL_SET_CPU_NUMBER 0x02000068
-
-#define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069
-
-/*
- * Service-specific constants
- */
-
-/* Console interrupt manipulation */
- /* action codes */
-#define SAL_CONSOLE_INTR_OFF 0 /* turn the interrupt off */
-#define SAL_CONSOLE_INTR_ON 1 /* turn the interrupt on */
-#define SAL_CONSOLE_INTR_STATUS 2 /* retrieve the interrupt status */
- /* interrupt specification & status return codes */
-#define SAL_CONSOLE_INTR_XMIT 1 /* output interrupt */
-#define SAL_CONSOLE_INTR_RECV 2 /* input interrupt */
-
-/* interrupt handling */
-#define SAL_INTR_ALLOC 1
-#define SAL_INTR_FREE 2
-
-/*
- * operations available on the generic SN_SAL_SYSCTL_OP
- * runtime service
- */
-#define SAL_SYSCTL_OP_IOBOARD 0x0001 /* retrieve board type */
-#define SAL_SYSCTL_OP_TIO_JLCK_RST 0x0002 /* issue TIO clock reset */
-
-/*
- * IRouter (i.e. generalized system controller) operations
- */
-#define SAL_IROUTER_OPEN 0 /* open a subchannel */
-#define SAL_IROUTER_CLOSE 1 /* close a subchannel */
-#define SAL_IROUTER_SEND 2 /* send part of an IRouter packet */
-#define SAL_IROUTER_RECV 3 /* receive part of an IRouter packet */
-#define SAL_IROUTER_INTR_STATUS 4 /* check the interrupt status for
- * an open subchannel
- */
-#define SAL_IROUTER_INTR_ON 5 /* enable an interrupt */
-#define SAL_IROUTER_INTR_OFF 6 /* disable an interrupt */
-#define SAL_IROUTER_INIT 7 /* initialize IRouter driver */
-
-/* IRouter interrupt mask bits */
-#define SAL_IROUTER_INTR_XMIT SAL_CONSOLE_INTR_XMIT
-#define SAL_IROUTER_INTR_RECV SAL_CONSOLE_INTR_RECV
-
-/*
- * Error Handling Features
- */
-#define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 // obsolete
-#define SAL_ERR_FEAT_LOG_SBES 0x2 // obsolete
-#define SAL_ERR_FEAT_MFR_OVERRIDE 0x4
-#define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000
-
-/*
- * SAL Error Codes
- */
-#define SALRET_MORE_PASSES 1
-#define SALRET_OK 0
-#define SALRET_NOT_IMPLEMENTED (-1)
-#define SALRET_INVALID_ARG (-2)
-#define SALRET_ERROR (-3)
-
-#define SN_SAL_FAKE_PROM 0x02009999
-
-/**
- * sn_sal_revision - get the SGI SAL revision number
- *
- * The SGI PROM stores its version in the sal_[ab]_rev_(major|minor).
- * This routine simply extracts the major and minor values and
- * presents them in a u32 format.
- *
- * For example, version 4.05 would be represented at 0x0405.
- */
-static inline u32
-sn_sal_rev(void)
-{
- struct ia64_sal_systab *systab = __va(efi.sal_systab);
-
- return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor);
-}
-
-/*
- * Returns the master console nasid, if the call fails, return an illegal
- * value.
- */
-static inline u64
-ia64_sn_get_console_nasid(void)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0);
-
- if (ret_stuff.status < 0)
- return ret_stuff.status;
-
- /* Master console nasid is in 'v0' */
- return ret_stuff.v0;
-}
-
-/*
- * Returns the master baseio nasid, if the call fails, return an illegal
- * value.
- */
-static inline u64
-ia64_sn_get_master_baseio_nasid(void)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 0);
-
- if (ret_stuff.status < 0)
- return ret_stuff.status;
-
- /* Master baseio nasid is in 'v0' */
- return ret_stuff.v0;
-}
-
-static inline void *
-ia64_sn_get_klconfig_addr(nasid_t nasid)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 0, 0);
- return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
-}
-
-/*
- * Returns the next console character.
- */
-static inline u64
-ia64_sn_console_getc(int *ch)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0);
-
- /* character is in 'v0' */
- *ch = (int)ret_stuff.v0;
-
- return ret_stuff.status;
-}
-
-/*
- * Read a character from the SAL console device, after a previous interrupt
- * or poll operation has given us to know that a character is available
- * to be read.
- */
-static inline u64
-ia64_sn_console_readc(void)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0);
-
- /* character is in 'v0' */
- return ret_stuff.v0;
-}
-
-/*
- * Sends the given character to the console.
- */
-static inline u64
-ia64_sn_console_putc(char ch)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (u64)ch, 0, 0, 0, 0, 0, 0);
-
- return ret_stuff.status;
-}
-
-/*
- * Sends the given buffer to the console.
- */
-static inline u64
-ia64_sn_console_putb(const char *buf, int len)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (u64)buf, (u64)len, 0, 0, 0, 0, 0);
-
- if ( ret_stuff.status == 0 ) {
- return ret_stuff.v0;
- }
- return (u64)0;
-}
-
-/*
- * Print a platform error record
- */
-static inline u64
-ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (u64)hook, (u64)rec, 0, 0, 0, 0, 0);
-
- return ret_stuff.status;
-}
-
-/*
- * Check for Platform errors
- */
-static inline u64
-ia64_sn_plat_cpei_handler(void)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0);
-
- return ret_stuff.status;
-}
-
-/*
- * Set Error Handling Features (Obsolete)
- */
-static inline u64
-ia64_sn_plat_set_error_handling_features(void)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_REENTRANT(ret_stuff, SN_SAL_SET_ERROR_HANDLING_FEATURES,
- SAL_ERR_FEAT_LOG_SBES,
- 0, 0, 0, 0, 0, 0);
-
- return ret_stuff.status;
-}
-
-/*
- * Checks for console input.
- */
-static inline u64
-ia64_sn_console_check(int *result)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0);
-
- /* result is in 'v0' */
- *result = (int)ret_stuff.v0;
-
- return ret_stuff.status;
-}
-
-/*
- * Checks console interrupt status
- */
-static inline u64
-ia64_sn_console_intr_status(void)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
- 0, SAL_CONSOLE_INTR_STATUS,
- 0, 0, 0, 0, 0);
-
- if (ret_stuff.status == 0) {
- return ret_stuff.v0;
- }
-
- return 0;
-}
-
-/*
- * Enable an interrupt on the SAL console device.
- */
-static inline void
-ia64_sn_console_intr_enable(u64 intr)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
- intr, SAL_CONSOLE_INTR_ON,
- 0, 0, 0, 0, 0);
-}
-
-/*
- * Disable an interrupt on the SAL console device.
- */
-static inline void
-ia64_sn_console_intr_disable(u64 intr)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
- intr, SAL_CONSOLE_INTR_OFF,
- 0, 0, 0, 0, 0);
-}
-
-/*
- * Sends a character buffer to the console asynchronously.
- */
-static inline u64
-ia64_sn_console_xmit_chars(char *buf, int len)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS,
- (u64)buf, (u64)len,
- 0, 0, 0, 0, 0);
-
- if (ret_stuff.status == 0) {
- return ret_stuff.v0;
- }
-
- return 0;
-}
-
-/*
- * Returns the iobrick module Id
- */
-static inline u64
-ia64_sn_sysctl_iobrick_module_get(nasid_t nasid, int *result)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 0, 0, 0, 0, 0);
-
- /* result is in 'v0' */
- *result = (int)ret_stuff.v0;
-
- return ret_stuff.status;
-}
-
-/**
- * ia64_sn_pod_mode - call the SN_SAL_POD_MODE function
- *
- * SN_SAL_POD_MODE actually takes an argument, but it's always
- * 0 when we call it from the kernel, so we don't have to expose
- * it to the caller.
- */
-static inline u64
-ia64_sn_pod_mode(void)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL_REENTRANT(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0);
- if (isrv.status)
- return 0;
- return isrv.v0;
-}
-
-/**
- * ia64_sn_probe_mem - read from memory safely
- * @addr: address to probe
- * @size: number bytes to read (1,2,4,8)
- * @data_ptr: address to store value read by probe (-1 returned if probe fails)
- *
- * Call into the SAL to do a memory read. If the read generates a machine
- * check, this routine will recover gracefully and return -1 to the caller.
- * @addr is usually a kernel virtual address in uncached space (i.e. the
- * address starts with 0xc), but if called in physical mode, @addr should
- * be a physical address.
- *
- * Return values:
- * 0 - probe successful
- * 1 - probe failed (generated MCA)
- * 2 - Bad arg
- * <0 - PAL error
- */
-static inline u64
-ia64_sn_probe_mem(long addr, long size, void *data_ptr)
-{
- struct ia64_sal_retval isrv;
-
- SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0);
-
- if (data_ptr) {
- switch (size) {
- case 1:
- *((u8*)data_ptr) = (u8)isrv.v0;
- break;
- case 2:
- *((u16*)data_ptr) = (u16)isrv.v0;
- break;
- case 4:
- *((u32*)data_ptr) = (u32)isrv.v0;
- break;
- case 8:
- *((u64*)data_ptr) = (u64)isrv.v0;
- break;
- default:
- isrv.status = 2;
- }
- }
- return isrv.status;
-}
-
-/*
- * Retrieve the system serial number as an ASCII string.
- */
-static inline u64
-ia64_sn_sys_serial_get(char *buf)
-{
- struct ia64_sal_retval ret_stuff;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 0);
- return ret_stuff.status;
-}
-
-extern char sn_system_serial_number_string[];
-extern u64 sn_partition_serial_number;
-
-static inline char *
-sn_system_serial_number(void) {
- if (sn_system_serial_number_string[0]) {
- return(sn_system_serial_number_string);
- } else {
- ia64_sn_sys_serial_get(sn_system_serial_number_string);
- return(sn_system_serial_number_string);
- }
-}
-
-
-/*
- * Returns a unique id number for this system and partition (suitable for
- * use with license managers), based in part on the system serial number.
- */
-static inline u64
-ia64_sn_partition_serial_get(void)
-{
- struct ia64_sal_retval ret_stuff;
- ia64_sal_oemcall_reentrant(&ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0,
- 0, 0, 0, 0, 0, 0);
- if (ret_stuff.status != 0)
- return 0;
- return ret_stuff.v0;
-}
-
-static inline u64
-sn_partition_serial_number_val(void) {
- if (unlikely(sn_partition_serial_number == 0)) {
- sn_partition_serial_number = ia64_sn_partition_serial_get();
- }
- return sn_partition_serial_number;
-}
-
-/*
- * Returns the partition id of the nasid passed in as an argument,
- * or INVALID_PARTID if the partition id cannot be retrieved.
- */
-static inline partid_t
-ia64_sn_sysctl_partition_get(nasid_t nasid)
-{
- struct ia64_sal_retval ret_stuff;
- SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
- 0, 0, 0, 0, 0, 0);
- if (ret_stuff.status != 0)
- return -1;
- return ((partid_t)ret_stuff.v0);
-}
-
-/*
- * Returns the physical address of the partition's reserved page through
- * an iterative number of calls.
- *
- * On first call, 'cookie' and 'len' should be set to 0, and 'addr'
- * set to the nasid of the partition whose reserved page's address is
- * being sought.
- * On subsequent calls, pass the values, that were passed back on the
- * previous call.
- *
- * While the return status equals SALRET_MORE_PASSES, keep calling
- * this function after first copying 'len' bytes starting at 'addr'
- * into 'buf'. Once the return status equals SALRET_OK, 'addr' will
- * be the physical address of the partition's reserved page. If the
- * return status equals neither of these, an error as occurred.
- */
-static inline s64
-sn_partition_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
-{
- struct ia64_sal_retval rv;
- ia64_sal_oemcall_reentrant(&rv, SN_SAL_GET_PARTITION_ADDR, *cookie,
- *addr, buf, *len, 0, 0, 0);
- *cookie = rv.v0;
- *addr = rv.v1;
- *len = rv.v2;
- return rv.status;
-}
-
-/*
- * Register or unregister a physical address range being referenced across
- * a partition boundary for which certain SAL errors should be scanned for,
- * cleaned up and ignored. This is of value for kernel partitioning code only.
- * Values for the operation argument:
- * 1 = register this address range with SAL
- * 0 = unregister this address range with SAL
- *
- * SAL maintains a reference count on an address range in case it is registered
- * multiple times.
- *
- * On success, returns the reference count of the address range after the SAL
- * call has performed the current registration/unregistration. Returns a
- * negative value if an error occurred.
- */
-static inline int
-sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
-{
- struct ia64_sal_retval ret_stuff;
- ia64_sal_oemcall(&ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len,
- (u64)operation, 0, 0, 0, 0);
- return ret_stuff.status;
-}
-
-/*
- * Register or unregister an instruction range for which SAL errors should
- * be ignored. If an error occurs while in the registered range, SAL jumps
- * to return_addr after ignoring the error. Values for the operation argument:
- * 1 = register this instruction range with SAL
- * 0 = unregister this instruction range with SAL
- *
- * Returns 0 on success, or a negative value if an error occurred.
- */
-static inline int
-sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
- int virtual, int operation)
-{
- struct ia64_sal_retval ret_stuff;
- u64 call;
- if (virtual) {
- call = SN_SAL_NO_FAULT_ZONE_VIRTUAL;
- } else {
- call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
- }
- ia64_sal_oemcall(&ret_stuff, call, start_addr, end_addr, return_addr,
- (u64)1, 0, 0, 0);
- return ret_stuff.status;
-}
-
-/*
- * Change or query the coherence domain for this partition. Each cpu-based
- * nasid is represented by a bit in an array of 64-bit words:
- * 0 = not in this partition's coherency domain
- * 1 = in this partition's coherency domain
- *
- * It is not possible for the local system's nasids to be removed from
- * the coherency domain. Purpose of the domain arguments:
- * new_domain = set the coherence domain to the given nasids
- * old_domain = return the current coherence domain
- *
- * Returns 0 on success, or a negative value if an error occurred.
- */
-static inline int
-sn_change_coherence(u64 *new_domain, u64 *old_domain)
-{
- struct ia64_sal_retval ret_stuff;
- ia64_sal_oemcall(&ret_stuff, SN_SAL_COHERENCE, (u64)new_domain,
- (u64)old_domain, 0, 0, 0, 0, 0);
- return ret_stuff.status;
-}
-
-/*
- * Change memory access protections for a physical address range.
- * nasid_array is not used on Altix, but may be in future architectures.
- * Available memory protection access classes are defined after the function.
- */
-static inline int
-sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
-{
- struct ia64_sal_retval ret_stuff;
-
- ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len,
- (u64)nasid_array, perms, 0, 0, 0);
- return ret_stuff.status;
-}
-#define SN_MEMPROT_ACCESS_CLASS_0 0x14a080
-#define SN_MEMPROT_ACCESS_CLASS_1 0x2520c2
-#define SN_MEMPROT_ACCESS_CLASS_2 0x14a1ca
-#define SN_MEMPROT_ACCESS_CLASS_3 0x14a290
-#define SN_MEMPROT_ACCESS_CLASS_6 0x084080
-#define SN_MEMPROT_ACCESS_CLASS_7 0x021080
-
-/*
- * Turns off system power.
- */
-static inline void
-ia64_sn_power_down(void)
-{
- struct ia64_sal_retval ret_stuff;
- SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
- while(1)
- cpu_relax();
- /* never returns */
-}
-
-/**
- * ia64_sn_fru_capture - tell the system controller to capture hw state
- *
- * This routine will call the SAL which will tell the system controller(s)
- * to capture hw mmr information from each SHub in the system.
- */
-static inline u64
-ia64_sn_fru_capture(void)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0);
- if (isrv.status)
- return 0;
- return isrv.v0;
-}
-
-/*
- * Performs an operation on a PCI bus or slot -- power up, power down
- * or reset.
- */
-static inline u64
-ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type,
- u64 bus, char slot,
- u64 action)
-{
- struct ia64_sal_retval rv = {0, 0, 0, 0};
-
- SAL_CALL_NOLOCK(rv, SN_SAL_SYSCTL_IOBRICK_PCI_OP, connection_type, n, action,
- bus, (u64) slot, 0, 0);
- if (rv.status)
- return rv.v0;
- return 0;
-}
-
-
-/*
- * Open a subchannel for sending arbitrary data to the system
- * controller network via the system controller device associated with
- * 'nasid'. Return the subchannel number or a negative error code.
- */
-static inline int
-ia64_sn_irtr_open(nasid_t nasid)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_OPEN, nasid,
- 0, 0, 0, 0, 0);
- return (int) rv.v0;
-}
-
-/*
- * Close system controller subchannel 'subch' previously opened on 'nasid'.
- */
-static inline int
-ia64_sn_irtr_close(nasid_t nasid, int subch)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_CLOSE,
- (u64) nasid, (u64) subch, 0, 0, 0, 0);
- return (int) rv.status;
-}
-
-/*
- * Read data from system controller associated with 'nasid' on
- * subchannel 'subch'. The buffer to be filled is pointed to by
- * 'buf', and its capacity is in the integer pointed to by 'len'. The
- * referent of 'len' is set to the number of bytes read by the SAL
- * call. The return value is either SALRET_OK (for bytes read) or
- * SALRET_ERROR (for error or "no data available").
- */
-static inline int
-ia64_sn_irtr_recv(nasid_t nasid, int subch, char *buf, int *len)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_RECV,
- (u64) nasid, (u64) subch, (u64) buf, (u64) len,
- 0, 0);
- return (int) rv.status;
-}
-
-/*
- * Write data to the system controller network via the system
- * controller associated with 'nasid' on suchannel 'subch'. The
- * buffer to be written out is pointed to by 'buf', and 'len' is the
- * number of bytes to be written. The return value is either the
- * number of bytes written (which could be zero) or a negative error
- * code.
- */
-static inline int
-ia64_sn_irtr_send(nasid_t nasid, int subch, char *buf, int len)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_SEND,
- (u64) nasid, (u64) subch, (u64) buf, (u64) len,
- 0, 0);
- return (int) rv.v0;
-}
-
-/*
- * Check whether any interrupts are pending for the system controller
- * associated with 'nasid' and its subchannel 'subch'. The return
- * value is a mask of pending interrupts (SAL_IROUTER_INTR_XMIT and/or
- * SAL_IROUTER_INTR_RECV).
- */
-static inline int
-ia64_sn_irtr_intr(nasid_t nasid, int subch)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_STATUS,
- (u64) nasid, (u64) subch, 0, 0, 0, 0);
- return (int) rv.v0;
-}
-
-/*
- * Enable the interrupt indicated by the intr parameter (either
- * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
- */
-static inline int
-ia64_sn_irtr_intr_enable(nasid_t nasid, int subch, u64 intr)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_ON,
- (u64) nasid, (u64) subch, intr, 0, 0, 0);
- return (int) rv.v0;
-}
-
-/*
- * Disable the interrupt indicated by the intr parameter (either
- * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
- */
-static inline int
-ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_OFF,
- (u64) nasid, (u64) subch, intr, 0, 0, 0);
- return (int) rv.v0;
-}
-
-/*
- * Set up a node as the point of contact for system controller
- * environmental event delivery.
- */
-static inline int
-ia64_sn_sysctl_event_init(nasid_t nasid)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_EVENT, (u64) nasid,
- 0, 0, 0, 0, 0, 0);
- return (int) rv.v0;
-}
-
-/*
- * Ask the system controller on the specified nasid to reset
- * the CX corelet clock. Only valid on TIO nodes.
- */
-static inline int
-ia64_sn_sysctl_tio_clock_reset(nasid_t nasid)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_TIO_JLCK_RST,
- nasid, 0, 0, 0, 0, 0);
- if (rv.status != 0)
- return (int)rv.status;
- if (rv.v0 != 0)
- return (int)rv.v0;
-
- return 0;
-}
-
-/*
- * Get the associated ioboard type for a given nasid.
- */
-static inline s64
-ia64_sn_sysctl_ioboard_get(nasid_t nasid, u16 *ioboard)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL_REENTRANT(isrv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_IOBOARD,
- nasid, 0, 0, 0, 0, 0);
- if (isrv.v0 != 0) {
- *ioboard = isrv.v0;
- return isrv.status;
- }
- if (isrv.v1 != 0) {
- *ioboard = isrv.v1;
- return isrv.status;
- }
-
- return isrv.status;
-}
-
-/**
- * ia64_sn_get_fit_compt - read a FIT entry from the PROM header
- * @nasid: NASID of node to read
- * @index: FIT entry index to be retrieved (0..n)
- * @fitentry: 16 byte buffer where FIT entry will be stored.
- * @banbuf: optional buffer for retrieving banner
- * @banlen: length of banner buffer
- *
- * Access to the physical PROM chips needs to be serialized since reads and
- * writes can't occur at the same time, so we need to call into the SAL when
- * we want to look at the FIT entries on the chips.
- *
- * Returns:
- * %SALRET_OK if ok
- * %SALRET_INVALID_ARG if index too big
- * %SALRET_NOT_IMPLEMENTED if running on older PROM
- * ??? if nasid invalid OR banner buffer not large enough
- */
-static inline int
-ia64_sn_get_fit_compt(u64 nasid, u64 index, void *fitentry, void *banbuf,
- u64 banlen)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_NOLOCK(rv, SN_SAL_GET_FIT_COMPT, nasid, index, fitentry,
- banbuf, banlen, 0, 0);
- return (int) rv.status;
-}
-
-/*
- * Initialize the SAL components of the system controller
- * communication driver; specifically pass in a sizable buffer that
- * can be used for allocation of subchannel queues as new subchannels
- * are opened. "buf" points to the buffer, and "len" specifies its
- * length.
- */
-static inline int
-ia64_sn_irtr_init(nasid_t nasid, void *buf, int len)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INIT,
- (u64) nasid, (u64) buf, (u64) len, 0, 0, 0);
- return (int) rv.status;
-}
-
-/*
- * Returns the nasid, subnode & slice corresponding to a SAPIC ID
- *
- * In:
- * arg0 - SN_SAL_GET_SAPIC_INFO
- * arg1 - sapicid (lid >> 16)
- * Out:
- * v0 - nasid
- * v1 - subnode
- * v2 - slice
- */
-static inline u64
-ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, sapicid, 0, 0, 0, 0, 0, 0);
-
-/***** BEGIN HACK - temp til old proms no longer supported ********/
- if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
- if (nasid) *nasid = sapicid & 0xfff;
- if (subnode) *subnode = (sapicid >> 13) & 1;
- if (slice) *slice = (sapicid >> 12) & 3;
- return 0;
- }
-/***** END HACK *******/
-
- if (ret_stuff.status < 0)
- return ret_stuff.status;
-
- if (nasid) *nasid = (int) ret_stuff.v0;
- if (subnode) *subnode = (int) ret_stuff.v1;
- if (slice) *slice = (int) ret_stuff.v2;
- return 0;
-}
-
-/*
- * Returns information about the HUB/SHUB.
- * In:
- * arg0 - SN_SAL_GET_SN_INFO
- * arg1 - 0 (other values reserved for future use)
- * Out:
- * v0
- * [7:0] - shub type (0=shub1, 1=shub2)
- * [15:8] - Log2 max number of nodes in entire system (includes
- * C-bricks, I-bricks, etc)
- * [23:16] - Log2 of nodes per sharing domain
- * [31:24] - partition ID
- * [39:32] - coherency_id
- * [47:40] - regionsize
- * v1
- * [15:0] - nasid mask (ex., 0x7ff for 11 bit nasid)
- * [23:15] - bit position of low nasid bit
- */
-static inline u64
-ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift,
- u8 *systemsize, u8 *sharing_domain_size, u8 *partid, u8 *coher, u8 *reg)
-{
- struct ia64_sal_retval ret_stuff;
-
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
- ret_stuff.v1 = 0;
- ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0);
-
-/***** BEGIN HACK - temp til old proms no longer supported ********/
- if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
- int nasid = get_sapicid() & 0xfff;
-#define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL
-#define SH_SHUB_ID_NODES_PER_BIT_SHFT 48
- if (shubtype) *shubtype = 0;
- if (nasid_bitmask) *nasid_bitmask = 0x7ff;
- if (nasid_shift) *nasid_shift = 38;
- if (systemsize) *systemsize = 10;
- if (sharing_domain_size) *sharing_domain_size = 8;
- if (partid) *partid = ia64_sn_sysctl_partition_get(nasid);
- if (coher) *coher = nasid >> 9;
- if (reg) *reg = (HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_SHUB_ID)) & SH_SHUB_ID_NODES_PER_BIT_MASK) >>
- SH_SHUB_ID_NODES_PER_BIT_SHFT;
- return 0;
- }
-/***** END HACK *******/
-
- if (ret_stuff.status < 0)
- return ret_stuff.status;
-
- if (shubtype) *shubtype = ret_stuff.v0 & 0xff;
- if (systemsize) *systemsize = (ret_stuff.v0 >> 8) & 0xff;
- if (sharing_domain_size) *sharing_domain_size = (ret_stuff.v0 >> 16) & 0xff;
- if (partid) *partid = (ret_stuff.v0 >> 24) & 0xff;
- if (coher) *coher = (ret_stuff.v0 >> 32) & 0xff;
- if (reg) *reg = (ret_stuff.v0 >> 40) & 0xff;
- if (nasid_bitmask) *nasid_bitmask = (ret_stuff.v1 & 0xffff);
- if (nasid_shift) *nasid_shift = (ret_stuff.v1 >> 16) & 0xff;
- return 0;
-}
-
-/*
- * This is the access point to the Altix PROM hardware performance
- * and status monitoring interface. For info on using this, see
- * include/asm-ia64/sn/sn2/sn_hwperf.h
- */
-static inline int
-ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
- u64 a3, u64 a4, int *v0)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_NOLOCK(rv, SN_SAL_HWPERF_OP, (u64)nasid,
- opcode, a0, a1, a2, a3, a4);
- if (v0)
- *v0 = (int) rv.v0;
- return (int) rv.status;
-}
-
-static inline int
-ia64_sn_ioif_get_pci_topology(u64 buf, u64 len)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_NOLOCK(rv, SN_SAL_IOIF_GET_PCI_TOPOLOGY, buf, len, 0, 0, 0, 0, 0);
- return (int) rv.status;
-}
-
-/*
- * BTE error recovery is implemented in SAL
- */
-static inline int
-ia64_sn_bte_recovery(nasid_t nasid)
-{
- struct ia64_sal_retval rv;
-
- rv.status = 0;
- SAL_CALL_NOLOCK(rv, SN_SAL_BTE_RECOVER, (u64)nasid, 0, 0, 0, 0, 0, 0);
- if (rv.status == SALRET_NOT_IMPLEMENTED)
- return 0;
- return (int) rv.status;
-}
-
-static inline int
-ia64_sn_is_fake_prom(void)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_NOLOCK(rv, SN_SAL_FAKE_PROM, 0, 0, 0, 0, 0, 0, 0);
- return (rv.status == 0);
-}
-
-static inline int
-ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set)
-{
- struct ia64_sal_retval rv;
-
- SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0);
- if (rv.status != 0)
- return rv.status;
- *feature_set = rv.v0;
- return 0;
-}
-
-static inline int
-ia64_sn_set_os_feature(int feature)
-{
- struct ia64_sal_retval rv;
-
- SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 0);
- return rv.status;
-}
-
-static inline int
-sn_inject_error(u64 paddr, u64 *data, u64 *ecc)
-{
- struct ia64_sal_retval ret_stuff;
-
- ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_INJECT_ERROR, paddr, (u64)data,
- (u64)ecc, 0, 0, 0, 0);
- return ret_stuff.status;
-}
-
-static inline int
-ia64_sn_set_cpu_number(int cpu)
-{
- struct ia64_sal_retval rv;
-
- SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0);
- return rv.status;
-}
-static inline int
-ia64_sn_kernel_launch_event(void)
-{
- struct ia64_sal_retval rv;
- SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
- return rv.status;
-}
-#endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/tiocp.h b/xen/include/asm-ia64/linux/asm/sn/tiocp.h
deleted file mode 100644
index e8ad0bb5b6..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/tiocp.h
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003-2005 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_PCI_TIOCP_H
-#define _ASM_IA64_SN_PCI_TIOCP_H
-
-#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL
-#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
-#define TIOCP_PCI64_CMDTYPE_MSI (0x3ull << 60)
-
-
-/*****************************************************************************
- *********************** TIOCP MMR structure mapping ***************************
- *****************************************************************************/
-
-struct tiocp{
-
- /* 0x000000-0x00FFFF -- Local Registers */
-
- /* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */
- u64 cp_id; /* 0x000000 */
- u64 cp_stat; /* 0x000008 */
- u64 cp_err_upper; /* 0x000010 */
- u64 cp_err_lower; /* 0x000018 */
- #define cp_err cp_err_lower
- u64 cp_control; /* 0x000020 */
- u64 cp_req_timeout; /* 0x000028 */
- u64 cp_intr_upper; /* 0x000030 */
- u64 cp_intr_lower; /* 0x000038 */
- #define cp_intr cp_intr_lower
- u64 cp_err_cmdword; /* 0x000040 */
- u64 _pad_000048; /* 0x000048 */
- u64 cp_tflush; /* 0x000050 */
-
- /* 0x000058-0x00007F -- Bridge-specific Configuration */
- u64 cp_aux_err; /* 0x000058 */
- u64 cp_resp_upper; /* 0x000060 */
- u64 cp_resp_lower; /* 0x000068 */
- #define cp_resp cp_resp_lower
- u64 cp_tst_pin_ctrl; /* 0x000070 */
- u64 cp_addr_lkerr; /* 0x000078 */
-
- /* 0x000080-0x00008F -- PMU & MAP */
- u64 cp_dir_map; /* 0x000080 */
- u64 _pad_000088; /* 0x000088 */
-
- /* 0x000090-0x00009F -- SSRAM */
- u64 cp_map_fault; /* 0x000090 */
- u64 _pad_000098; /* 0x000098 */
-
- /* 0x0000A0-0x0000AF -- Arbitration */
- u64 cp_arb; /* 0x0000A0 */
- u64 _pad_0000A8; /* 0x0000A8 */
-
- /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
- u64 cp_ate_parity_err; /* 0x0000B0 */
- u64 _pad_0000B8; /* 0x0000B8 */
-
- /* 0x0000C0-0x0000FF -- PCI/GIO */
- u64 cp_bus_timeout; /* 0x0000C0 */
- u64 cp_pci_cfg; /* 0x0000C8 */
- u64 cp_pci_err_upper; /* 0x0000D0 */
- u64 cp_pci_err_lower; /* 0x0000D8 */
- #define cp_pci_err cp_pci_err_lower
- u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */
-
- /* 0x000100-0x0001FF -- Interrupt */
- u64 cp_int_status; /* 0x000100 */
- u64 cp_int_enable; /* 0x000108 */
- u64 cp_int_rst_stat; /* 0x000110 */
- u64 cp_int_mode; /* 0x000118 */
- u64 cp_int_device; /* 0x000120 */
- u64 cp_int_host_err; /* 0x000128 */
- u64 cp_int_addr[8]; /* 0x0001{30,,,68} */
- u64 cp_err_int_view; /* 0x000170 */
- u64 cp_mult_int; /* 0x000178 */
- u64 cp_force_always[8]; /* 0x0001{80,,,B8} */
- u64 cp_force_pin[8]; /* 0x0001{C0,,,F8} */
-
- /* 0x000200-0x000298 -- Device */
- u64 cp_device[4]; /* 0x0002{00,,,18} */
- u64 _pad_000220[4]; /* 0x0002{20,,,38} */
- u64 cp_wr_req_buf[4]; /* 0x0002{40,,,58} */
- u64 _pad_000260[4]; /* 0x0002{60,,,78} */
- u64 cp_rrb_map[2]; /* 0x0002{80,,,88} */
- #define cp_even_resp cp_rrb_map[0] /* 0x000280 */
- #define cp_odd_resp cp_rrb_map[1] /* 0x000288 */
- u64 cp_resp_status; /* 0x000290 */
- u64 cp_resp_clear; /* 0x000298 */
-
- u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */
-
- /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
- struct {
- u64 upper; /* 0x0003{00,,,F0} */
- u64 lower; /* 0x0003{08,,,F8} */
- } cp_buf_addr_match[16];
-
- /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
- struct {
- u64 flush_w_touch; /* 0x000{400,,,5C0} */
- u64 flush_wo_touch; /* 0x000{408,,,5C8} */
- u64 inflight; /* 0x000{410,,,5D0} */
- u64 prefetch; /* 0x000{418,,,5D8} */
- u64 total_pci_retry; /* 0x000{420,,,5E0} */
- u64 max_pci_retry; /* 0x000{428,,,5E8} */
- u64 max_latency; /* 0x000{430,,,5F0} */
- u64 clear_all; /* 0x000{438,,,5F8} */
- } cp_buf_count[8];
-
-
- /* 0x000600-0x0009FF -- PCI/X registers */
- u64 cp_pcix_bus_err_addr; /* 0x000600 */
- u64 cp_pcix_bus_err_attr; /* 0x000608 */
- u64 cp_pcix_bus_err_data; /* 0x000610 */
- u64 cp_pcix_pio_split_addr; /* 0x000618 */
- u64 cp_pcix_pio_split_attr; /* 0x000620 */
- u64 cp_pcix_dma_req_err_attr; /* 0x000628 */
- u64 cp_pcix_dma_req_err_addr; /* 0x000630 */
- u64 cp_pcix_timeout; /* 0x000638 */
-
- u64 _pad_000640[24]; /* 0x000{640,,,6F8} */
-
- /* 0x000700-0x000737 -- Debug Registers */
- u64 cp_ct_debug_ctl; /* 0x000700 */
- u64 cp_br_debug_ctl; /* 0x000708 */
- u64 cp_mux3_debug_ctl; /* 0x000710 */
- u64 cp_mux4_debug_ctl; /* 0x000718 */
- u64 cp_mux5_debug_ctl; /* 0x000720 */
- u64 cp_mux6_debug_ctl; /* 0x000728 */
- u64 cp_mux7_debug_ctl; /* 0x000730 */
-
- u64 _pad_000738[89]; /* 0x000{738,,,9F8} */
-
- /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
- struct {
- u64 cp_buf_addr; /* 0x000{A00,,,AF0} */
- u64 cp_buf_attr; /* 0X000{A08,,,AF8} */
- } cp_pcix_read_buf_64[16];
-
- struct {
- u64 cp_buf_addr; /* 0x000{B00,,,BE0} */
- u64 cp_buf_attr; /* 0x000{B08,,,BE8} */
- u64 cp_buf_valid; /* 0x000{B10,,,BF0} */
- u64 __pad1; /* 0x000{B18,,,BF8} */
- } cp_pcix_write_buf_64[8];
-
- /* End of Local Registers -- Start of Address Map space */
-
- char _pad_000c00[0x010000 - 0x000c00];
-
- /* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */
- u64 cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */
-
- char _pad_012000[0x14000 - 0x012000];
-
- /* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */
- u64 cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */
-
- char _pad_016000[0x18000 - 0x016000];
-
- /* 0x18000-0x197F8 -- TIOCP Write Request Ram */
- u64 cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
- u64 cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
- u64 cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
-
- char _pad_019800[0x1C000 - 0x019800];
-
- /* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */
- u64 cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */
- u64 cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */
- u64 cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */
-
- char _pad_01F000[0x20000 - 0x01F000];
-
- /* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */
- char _pad_020000[0x021000 - 0x20000];
-
- /* 0x021000-0x027FFF -- PCI Device Configuration Spaces */
- union {
- u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
- u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
- u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
- u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
- union {
- u8 c[0x100 / 1];
- u16 s[0x100 / 2];
- u32 l[0x100 / 4];
- u64 d[0x100 / 8];
- } f[8];
- } cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */
-
- /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
- union {
- u8 c[0x1000 / 1]; /* 0x028000-0x029000 */
- u16 s[0x1000 / 2]; /* 0x028000-0x029000 */
- u32 l[0x1000 / 4]; /* 0x028000-0x029000 */
- u64 d[0x1000 / 8]; /* 0x028000-0x029000 */
- union {
- u8 c[0x100 / 1];
- u16 s[0x100 / 2];
- u32 l[0x100 / 4];
- u64 d[0x100 / 8];
- } f[8];
- } cp_type1_cfg; /* 0x028000-0x029000 */
-
- char _pad_029000[0x030000-0x029000];
-
- /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
- union {
- u8 c[8 / 1];
- u16 s[8 / 2];
- u32 l[8 / 4];
- u64 d[8 / 8];
- } cp_pci_iack; /* 0x030000-0x030007 */
-
- char _pad_030007[0x040000-0x030008];
-
- /* 0x040000-0x040007 -- PCIX Special Cycle */
- union {
- u8 c[8 / 1];
- u16 s[8 / 2];
- u32 l[8 / 4];
- u64 d[8 / 8];
- } cp_pcix_cycle; /* 0x040000-0x040007 */
-
- char _pad_040007[0x200000-0x040008];
-
- /* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */
- union {
- u8 c[0x100000 / 1];
- u16 s[0x100000 / 2];
- u32 l[0x100000 / 4];
- u64 d[0x100000 / 8];
- } cp_devio_raw[6]; /* 0x200000-0x7FFFFF */
-
- #define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)]
-
- char _pad_800000[0xA00000-0x800000];
-
- /* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */
- union {
- u8 c[0x100000 / 1];
- u16 s[0x100000 / 2];
- u32 l[0x100000 / 4];
- u64 d[0x100000 / 8];
- } cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */
-
- #define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)]
-
-};
-
-#endif /* _ASM_IA64_SN_PCI_TIOCP_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/xbow.h b/xen/include/asm-ia64/linux/asm/sn/xbow.h
deleted file mode 100644
index 90f37a4133..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/xbow.h
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All Rights
- * Reserved.
- */
-#ifndef _ASM_IA64_SN_XTALK_XBOW_H
-#define _ASM_IA64_SN_XTALK_XBOW_H
-
-#define XBOW_PORT_8 0x8
-#define XBOW_PORT_C 0xc
-#define XBOW_PORT_F 0xf
-
-#define MAX_XBOW_PORTS 8 /* number of ports on xbow chip */
-#define BASE_XBOW_PORT XBOW_PORT_8 /* Lowest external port */
-
-#define XBOW_CREDIT 4
-
-#define MAX_XBOW_NAME 16
-
-/* Register set for each xbow link */
-typedef volatile struct xb_linkregs_s {
-/*
- * we access these through synergy unswizzled space, so the address
- * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
- * That's why we put the register first and filler second.
- */
- u32 link_ibf;
- u32 filler0; /* filler for proper alignment */
- u32 link_control;
- u32 filler1;
- u32 link_status;
- u32 filler2;
- u32 link_arb_upper;
- u32 filler3;
- u32 link_arb_lower;
- u32 filler4;
- u32 link_status_clr;
- u32 filler5;
- u32 link_reset;
- u32 filler6;
- u32 link_aux_status;
- u32 filler7;
-} xb_linkregs_t;
-
-typedef volatile struct xbow_s {
- /* standard widget configuration 0x000000-0x000057 */
- struct widget_cfg xb_widget; /* 0x000000 */
-
- /* helper fieldnames for accessing bridge widget */
-
-#define xb_wid_id xb_widget.w_id
-#define xb_wid_stat xb_widget.w_status
-#define xb_wid_err_upper xb_widget.w_err_upper_addr
-#define xb_wid_err_lower xb_widget.w_err_lower_addr
-#define xb_wid_control xb_widget.w_control
-#define xb_wid_req_timeout xb_widget.w_req_timeout
-#define xb_wid_int_upper xb_widget.w_intdest_upper_addr
-#define xb_wid_int_lower xb_widget.w_intdest_lower_addr
-#define xb_wid_err_cmdword xb_widget.w_err_cmd_word
-#define xb_wid_llp xb_widget.w_llp_cfg
-#define xb_wid_stat_clr xb_widget.w_tflush
-
-/*
- * we access these through synergy unswizzled space, so the address
- * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
- * That's why we put the register first and filler second.
- */
- /* xbow-specific widget configuration 0x000058-0x0000FF */
- u32 xb_wid_arb_reload; /* 0x00005C */
- u32 _pad_000058;
- u32 xb_perf_ctr_a; /* 0x000064 */
- u32 _pad_000060;
- u32 xb_perf_ctr_b; /* 0x00006c */
- u32 _pad_000068;
- u32 xb_nic; /* 0x000074 */
- u32 _pad_000070;
-
- /* Xbridge only */
- u32 xb_w0_rst_fnc; /* 0x00007C */
- u32 _pad_000078;
- u32 xb_l8_rst_fnc; /* 0x000084 */
- u32 _pad_000080;
- u32 xb_l9_rst_fnc; /* 0x00008c */
- u32 _pad_000088;
- u32 xb_la_rst_fnc; /* 0x000094 */
- u32 _pad_000090;
- u32 xb_lb_rst_fnc; /* 0x00009c */
- u32 _pad_000098;
- u32 xb_lc_rst_fnc; /* 0x0000a4 */
- u32 _pad_0000a0;
- u32 xb_ld_rst_fnc; /* 0x0000ac */
- u32 _pad_0000a8;
- u32 xb_le_rst_fnc; /* 0x0000b4 */
- u32 _pad_0000b0;
- u32 xb_lf_rst_fnc; /* 0x0000bc */
- u32 _pad_0000b8;
- u32 xb_lock; /* 0x0000c4 */
- u32 _pad_0000c0;
- u32 xb_lock_clr; /* 0x0000cc */
- u32 _pad_0000c8;
- /* end of Xbridge only */
- u32 _pad_0000d0[12];
-
- /* Link Specific Registers, port 8..15 0x000100-0x000300 */
- xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS];
-} xbow_t;
-
-#define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
-
-#define XB_FLAGS_EXISTS 0x1 /* device exists */
-#define XB_FLAGS_MASTER 0x2
-#define XB_FLAGS_SLAVE 0x0
-#define XB_FLAGS_GBR 0x4
-#define XB_FLAGS_16BIT 0x8
-#define XB_FLAGS_8BIT 0x0
-
-/* is widget port number valid? (based on version 7.0 of xbow spec) */
-#define XBOW_WIDGET_IS_VALID(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_F)
-
-/* whether to use upper or lower arbitration register, given source widget id */
-#define XBOW_ARB_IS_UPPER(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_B)
-#define XBOW_ARB_IS_LOWER(wid) ((wid) >= XBOW_PORT_C && (wid) <= XBOW_PORT_F)
-
-/* offset of arbitration register, given source widget id */
-#define XBOW_ARB_OFF(wid) (XBOW_ARB_IS_UPPER(wid) ? 0x1c : 0x24)
-
-#define XBOW_WID_ID WIDGET_ID
-#define XBOW_WID_STAT WIDGET_STATUS
-#define XBOW_WID_ERR_UPPER WIDGET_ERR_UPPER_ADDR
-#define XBOW_WID_ERR_LOWER WIDGET_ERR_LOWER_ADDR
-#define XBOW_WID_CONTROL WIDGET_CONTROL
-#define XBOW_WID_REQ_TO WIDGET_REQ_TIMEOUT
-#define XBOW_WID_INT_UPPER WIDGET_INTDEST_UPPER_ADDR
-#define XBOW_WID_INT_LOWER WIDGET_INTDEST_LOWER_ADDR
-#define XBOW_WID_ERR_CMDWORD WIDGET_ERR_CMD_WORD
-#define XBOW_WID_LLP WIDGET_LLP_CFG
-#define XBOW_WID_STAT_CLR WIDGET_TFLUSH
-#define XBOW_WID_ARB_RELOAD 0x5c
-#define XBOW_WID_PERF_CTR_A 0x64
-#define XBOW_WID_PERF_CTR_B 0x6c
-#define XBOW_WID_NIC 0x74
-
-/* Xbridge only */
-#define XBOW_W0_RST_FNC 0x00007C
-#define XBOW_L8_RST_FNC 0x000084
-#define XBOW_L9_RST_FNC 0x00008c
-#define XBOW_LA_RST_FNC 0x000094
-#define XBOW_LB_RST_FNC 0x00009c
-#define XBOW_LC_RST_FNC 0x0000a4
-#define XBOW_LD_RST_FNC 0x0000ac
-#define XBOW_LE_RST_FNC 0x0000b4
-#define XBOW_LF_RST_FNC 0x0000bc
-#define XBOW_RESET_FENCE(x) ((x) > 7 && (x) < 16) ? \
- (XBOW_W0_RST_FNC + ((x) - 7) * 8) : \
- ((x) == 0) ? XBOW_W0_RST_FNC : 0
-#define XBOW_LOCK 0x0000c4
-#define XBOW_LOCK_CLR 0x0000cc
-/* End of Xbridge only */
-
-/* used only in ide, but defined here within the reserved portion */
-/* of the widget0 address space (before 0xf4) */
-#define XBOW_WID_UNDEF 0xe4
-
-/* xbow link register set base, legal value for x is 0x8..0xf */
-#define XB_LINK_BASE 0x100
-#define XB_LINK_OFFSET 0x40
-#define XB_LINK_REG_BASE(x) (XB_LINK_BASE + ((x) & (MAX_XBOW_PORTS - 1)) * XB_LINK_OFFSET)
-
-#define XB_LINK_IBUF_FLUSH(x) (XB_LINK_REG_BASE(x) + 0x4)
-#define XB_LINK_CTRL(x) (XB_LINK_REG_BASE(x) + 0xc)
-#define XB_LINK_STATUS(x) (XB_LINK_REG_BASE(x) + 0x14)
-#define XB_LINK_ARB_UPPER(x) (XB_LINK_REG_BASE(x) + 0x1c)
-#define XB_LINK_ARB_LOWER(x) (XB_LINK_REG_BASE(x) + 0x24)
-#define XB_LINK_STATUS_CLR(x) (XB_LINK_REG_BASE(x) + 0x2c)
-#define XB_LINK_RESET(x) (XB_LINK_REG_BASE(x) + 0x34)
-#define XB_LINK_AUX_STATUS(x) (XB_LINK_REG_BASE(x) + 0x3c)
-
-/* link_control(x) */
-#define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */
-/* reserved: 0x40000000 */
-#define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */
-#define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer
- level */
-#define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8
- bit mode */
-#define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP
- packet */
-#define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit
- mask */
-#define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit
- shift */
-#define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination
- */
-#define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input
- buffer */
-/* reserved: 0x0000fe00 */
-#define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */
-#define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */
-#define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */
-#define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */
-#define XB_CTRL_RCV_IE 0x00000010 /* receive */
-#define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */
-/* reserved: 0x00000004 */
-#define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request
- timeout */
-#define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */
-
-/* link_status(x) */
-#define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE
-/* reserved: 0x7ff80000 */
-#define XB_STAT_MULTI_ERR 0x00040000 /* multi error */
-#define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE
-#define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE
-#define XB_STAT_BNDWDTH_ALLOC_ID_MSK 0x0000ff00 /* port bitmask */
-#define XB_STAT_RCV_CNT_OFLOW_ERR XB_CTRL_RCV_CNT_OFLOW_IE
-#define XB_STAT_XMT_CNT_OFLOW_ERR XB_CTRL_XMT_CNT_OFLOW_IE
-#define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE
-#define XB_STAT_RCV_ERR XB_CTRL_RCV_IE
-#define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE
-/* reserved: 0x00000004 */
-#define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE
-#define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE
-
-/* link_aux_status(x) */
-#define XB_AUX_STAT_RCV_CNT 0xff000000
-#define XB_AUX_STAT_XMT_CNT 0x00ff0000
-#define XB_AUX_STAT_TOUT_DST 0x0000ff00
-#define XB_AUX_LINKFAIL_RST_BAD 0x00000040
-#define XB_AUX_STAT_PRESENT 0x00000020
-#define XB_AUX_STAT_PORT_WIDTH 0x00000010
-/* reserved: 0x0000000f */
-
-/*
- * link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper
- * register if (x) is 0x8..0xb, link_arb_lower if (x) is 0xc..0xf
- */
-#define XB_ARB_GBR_MSK 0x1f
-#define XB_ARB_RR_MSK 0x7
-#define XB_ARB_GBR_SHFT(x) (((x) & 0x3) * 8)
-#define XB_ARB_RR_SHFT(x) (((x) & 0x3) * 8 + 5)
-#define XB_ARB_GBR_CNT(reg,x) ((reg) >> XB_ARB_GBR_SHFT(x) & XB_ARB_GBR_MSK)
-#define XB_ARB_RR_CNT(reg,x) ((reg) >> XB_ARB_RR_SHFT(x) & XB_ARB_RR_MSK)
-
-/* XBOW_WID_STAT */
-#define XB_WID_STAT_LINK_INTR_SHFT (24)
-#define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT)
-#define XB_WID_STAT_LINK_INTR(x) \
- (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT))
-#define XB_WID_STAT_WIDGET0_INTR 0x00800000
-#define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */
-#define XB_WID_STAT_REG_ACC_ERR 0x00000020
-#define XB_WID_STAT_RECV_TOUT 0x00000010 /* Xbridge only */
-#define XB_WID_STAT_ARB_TOUT 0x00000008 /* Xbridge only */
-#define XB_WID_STAT_XTALK_ERR 0x00000004
-#define XB_WID_STAT_DST_TOUT 0x00000002 /* Xbridge only */
-#define XB_WID_STAT_MULTI_ERR 0x00000001
-
-#define XB_WID_STAT_SRCID_SHFT 6
-
-/* XBOW_WID_CONTROL */
-#define XB_WID_CTRL_REG_ACC_IE XB_WID_STAT_REG_ACC_ERR
-#define XB_WID_CTRL_RECV_TOUT XB_WID_STAT_RECV_TOUT
-#define XB_WID_CTRL_ARB_TOUT XB_WID_STAT_ARB_TOUT
-#define XB_WID_CTRL_XTALK_IE XB_WID_STAT_XTALK_ERR
-
-/* XBOW_WID_INT_UPPER */
-/* defined in xwidget.h for WIDGET_INTDEST_UPPER_ADDR */
-
-/* XBOW WIDGET part number, in the ID register */
-#define XBOW_WIDGET_PART_NUM 0x0 /* crossbow */
-#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */
-#define XBOW_WIDGET_MFGR_NUM 0x0
-#define XXBOW_WIDGET_MFGR_NUM 0x0
-#define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */
-
-#define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */
-#define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */
-#define XBOW_REV_1_2 0x3 /* xbow rev 1.2 is "3" */
-#define XBOW_REV_1_3 0x4 /* xbow rev 1.3 is "4" */
-#define XBOW_REV_2_0 0x5 /* xbow rev 2.0 is "5" */
-
-#define XXBOW_PART_REV_1_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x1 )
-#define XXBOW_PART_REV_2_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x2 )
-
-/* XBOW_WID_ARB_RELOAD */
-#define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */
-
-#define IS_XBRIDGE_XBOW(wid) \
- (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \
- XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
-
-#define IS_PIC_XBOW(wid) \
- (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \
- XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
-
-#define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv)
-
-#endif /* _ASM_IA64_SN_XTALK_XBOW_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/xwidgetdev.h b/xen/include/asm-ia64/linux/asm/sn/xwidgetdev.h
deleted file mode 100644
index 2800eda0fd..0000000000
--- a/xen/include/asm-ia64/linux/asm/sn/xwidgetdev.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
- */
-#ifndef _ASM_IA64_SN_XTALK_XWIDGET_H
-#define _ASM_IA64_SN_XTALK_XWIDGET_H
-
-/* WIDGET_ID */
-#define WIDGET_REV_NUM 0xf0000000
-#define WIDGET_PART_NUM 0x0ffff000
-#define WIDGET_MFG_NUM 0x00000ffe
-#define WIDGET_REV_NUM_SHFT 28
-#define WIDGET_PART_NUM_SHFT 12
-#define WIDGET_MFG_NUM_SHFT 1
-
-#define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT)
-#define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT)
-#define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT)
-#define XWIDGET_PART_REV_NUM(widgetid) ((XWIDGET_PART_NUM(widgetid) << 4) | \
- XWIDGET_REV_NUM(widgetid))
-#define XWIDGET_PART_REV_NUM_REV(partrev) (partrev & 0xf)
-
-/* widget configuration registers */
-struct widget_cfg{
- u32 w_id; /* 0x04 */
- u32 w_pad_0; /* 0x00 */
- u32 w_status; /* 0x0c */
- u32 w_pad_1; /* 0x08 */
- u32 w_err_upper_addr; /* 0x14 */
- u32 w_pad_2; /* 0x10 */
- u32 w_err_lower_addr; /* 0x1c */
- u32 w_pad_3; /* 0x18 */
- u32 w_control; /* 0x24 */
- u32 w_pad_4; /* 0x20 */
- u32 w_req_timeout; /* 0x2c */
- u32 w_pad_5; /* 0x28 */
- u32 w_intdest_upper_addr; /* 0x34 */
- u32 w_pad_6; /* 0x30 */
- u32 w_intdest_lower_addr; /* 0x3c */
- u32 w_pad_7; /* 0x38 */
- u32 w_err_cmd_word; /* 0x44 */
- u32 w_pad_8; /* 0x40 */
- u32 w_llp_cfg; /* 0x4c */
- u32 w_pad_9; /* 0x48 */
- u32 w_tflush; /* 0x54 */
- u32 w_pad_10; /* 0x50 */
-};
-
-/*
- * Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec.
- */
-struct xwidget_hwid{
- int mfg_num;
- int rev_num;
- int part_num;
-};
-
-struct xwidget_info{
-
- struct xwidget_hwid xwi_hwid; /* Widget Identification */
- char xwi_masterxid; /* Hub's Widget Port Number */
- void *xwi_hubinfo; /* Hub's provider private info */
- u64 *xwi_hub_provider; /* prom provider functions */
- void *xwi_vertex;
-};
-
-#endif /* _ASM_IA64_SN_XTALK_XWIDGET_H */
diff --git a/xen/include/asm-ia64/linux/asm/string.h b/xen/include/asm-ia64/linux/asm/string.h
deleted file mode 100644
index 43502d3b57..0000000000
--- a/xen/include/asm-ia64/linux/asm/string.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _ASM_IA64_STRING_H
-#define _ASM_IA64_STRING_H
-
-/*
- * Here is where we want to put optimized versions of the string
- * routines.
- *
- * Copyright (C) 1998-2000, 2002 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/config.h> /* remove this once we remove the A-step workaround... */
-
-#define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */
-#define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */
-#define __HAVE_ARCH_MEMCPY 1 /* see arch/ia64/lib/memcpy.S */
-
-extern __kernel_size_t strlen (const char *);
-extern void *memcpy (void *, const void *, __kernel_size_t);
-extern void *memset (void *, int, __kernel_size_t);
-
-#endif /* _ASM_IA64_STRING_H */
diff --git a/xen/include/asm-ia64/linux/asm/thread_info.h b/xen/include/asm-ia64/linux/asm/thread_info.h
deleted file mode 100644
index 7dc8951708..0000000000
--- a/xen/include/asm-ia64/linux/asm/thread_info.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-#ifndef _ASM_IA64_THREAD_INFO_H
-#define _ASM_IA64_THREAD_INFO_H
-
-#include <asm/offsets.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-
-#define PREEMPT_ACTIVE_BIT 30
-#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
-
-#ifndef __ASSEMBLY__
-
-/*
- * On IA-64, we want to keep the task structure and kernel stack together, so they can be
- * mapped by a single TLB entry and so they can be addressed by the "current" pointer
- * without having to do pointer masking.
- */
-struct thread_info {
- struct task_struct *task; /* XXX not really needed, except for dup_task_struct() */
- struct exec_domain *exec_domain;/* execution domain */
- __u32 flags; /* thread_info flags (see TIF_*) */
- __u32 cpu; /* current CPU */
- mm_segment_t addr_limit; /* user-level address space limit */
- int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
- struct restart_block restart_block;
- struct {
- int signo;
- int code;
- void __user *addr;
- unsigned long start_time;
- pid_t pid;
- } sigdelayed; /* Saved information for TIF_SIGDELAYED */
-};
-
-#define THREAD_SIZE KERNEL_STACK_SIZE
-
-#define INIT_THREAD_INFO(tsk) \
-{ \
- .task = &tsk, \
- .exec_domain = &default_exec_domain, \
- .flags = 0, \
- .cpu = 0, \
- .addr_limit = KERNEL_DS, \
- .preempt_count = 0, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
-}
-
-/* how to get the thread information struct from C */
-#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
-#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
-#define free_thread_info(ti) /* nothing */
-
-#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
-#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER))
-#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
-
-#endif /* !__ASSEMBLY */
-
-/*
- * thread information flags
- * - these are process state flags that various assembly files may need to access
- * - pending work-to-be-done flags are in least-significant 16 bits, other flags
- * in top 16 bits
- */
-#define TIF_NOTIFY_RESUME 0 /* resumption notification requested */
-#define TIF_SIGPENDING 1 /* signal pending */
-#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
-#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
-#define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */
-#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE 17
-
-#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
-#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
-#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
-#define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED)
-#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
-
-/* "work to do on user-return" bits */
-#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED)
-/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
-#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
-
-#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/xen/include/asm-ia64/linux/asm/timex.h b/xen/include/asm-ia64/linux/asm/timex.h
deleted file mode 100644
index 414aae0604..0000000000
--- a/xen/include/asm-ia64/linux/asm/timex.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef _ASM_IA64_TIMEX_H
-#define _ASM_IA64_TIMEX_H
-
-/*
- * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-/*
- * 2001/01/18 davidm Removed CLOCK_TICK_RATE. It makes no sense on IA-64.
- * Also removed cacheflush_time as it's entirely unused.
- */
-
-#include <asm/intrinsics.h>
-#include <asm/processor.h>
-
-typedef unsigned long cycles_t;
-
-/*
- * For performance reasons, we don't want to define CLOCK_TICK_TRATE as
- * local_cpu_data->itc_rate. Fortunately, we don't have to, either: according to George
- * Anzinger, 1/CLOCK_TICK_RATE is taken as the resolution of the timer clock. The time
- * calculation assumes that you will use enough of these so that your tick size <= 1/HZ.
- * If the calculation shows that your CLOCK_TICK_RATE can not supply exactly 1/HZ ticks,
- * the actual value is calculated and used to update the wall clock each jiffie. Setting
- * the CLOCK_TICK_RATE to x*HZ insures that the calculation will find no errors. Hence we
- * pick a multiple of HZ which gives us a (totally virtual) CLOCK_TICK_RATE of about
- * 100MHz.
- */
-#define CLOCK_TICK_RATE (HZ * 100000UL)
-
-static inline cycles_t
-get_cycles (void)
-{
- cycles_t ret;
-
- ret = ia64_getreg(_IA64_REG_AR_ITC);
- return ret;
-}
-
-#endif /* _ASM_IA64_TIMEX_H */
diff --git a/xen/include/asm-ia64/linux/asm/topology.h b/xen/include/asm-ia64/linux/asm/topology.h
deleted file mode 100644
index 399bc29729..0000000000
--- a/xen/include/asm-ia64/linux/asm/topology.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * linux/include/asm-ia64/topology.h
- *
- * Copyright (C) 2002, Erich Focht, NEC
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef _ASM_IA64_TOPOLOGY_H
-#define _ASM_IA64_TOPOLOGY_H
-
-#include <asm/acpi.h>
-#include <asm/numa.h>
-#include <asm/smp.h>
-
-#ifdef CONFIG_NUMA
-/*
- * Returns the number of the node containing CPU 'cpu'
- */
-#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu])
-
-/*
- * Returns a bitmask of CPUs on Node 'node'.
- */
-#define node_to_cpumask(node) (node_to_cpu_mask[node])
-
-/*
- * Returns the number of the node containing Node 'nid'.
- * Not implemented here. Multi-level hierarchies detected with
- * the help of node_distance().
- */
-#define parent_node(nid) (nid)
-
-/*
- * Returns the number of the first CPU on Node 'node'.
- */
-#define node_to_first_cpu(node) (__ffs(node_to_cpumask(node)))
-
-/*
- * Determines the node for a given pci bus
- */
-#define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node
-
-void build_cpu_to_node_map(void);
-
-#define SD_CPU_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
- .parent = NULL, \
- .groups = NULL, \
- .min_interval = 1, \
- .max_interval = 4, \
- .busy_factor = 64, \
- .imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
- .per_cpu_gain = 100, \
- .cache_nice_tries = 2, \
- .busy_idx = 2, \
- .idle_idx = 1, \
- .newidle_idx = 2, \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_NEWIDLE \
- | SD_BALANCE_EXEC \
- | SD_WAKE_AFFINE, \
- .last_balance = jiffies, \
- .balance_interval = 1, \
- .nr_balance_failed = 0, \
-}
-
-/* sched_domains SD_NODE_INIT for IA64 NUMA machines */
-#define SD_NODE_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
- .parent = NULL, \
- .groups = NULL, \
- .min_interval = 8, \
- .max_interval = 8*(min(num_online_cpus(), 32)), \
- .busy_factor = 64, \
- .imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
- .cache_nice_tries = 2, \
- .busy_idx = 3, \
- .idle_idx = 2, \
- .newidle_idx = 0, /* unused */ \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
- .per_cpu_gain = 100, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_EXEC \
- | SD_BALANCE_FORK \
- | SD_WAKE_BALANCE, \
- .last_balance = jiffies, \
- .balance_interval = 64, \
- .nr_balance_failed = 0, \
-}
-
-/* sched_domains SD_ALLNODES_INIT for IA64 NUMA machines */
-#define SD_ALLNODES_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
- .parent = NULL, \
- .groups = NULL, \
- .min_interval = 64, \
- .max_interval = 64*num_online_cpus(), \
- .busy_factor = 128, \
- .imbalance_pct = 133, \
- .cache_hot_time = (10*1000000), \
- .cache_nice_tries = 1, \
- .busy_idx = 3, \
- .idle_idx = 3, \
- .newidle_idx = 0, /* unused */ \
- .wake_idx = 0, /* unused */ \
- .forkexec_idx = 0, /* unused */ \
- .per_cpu_gain = 100, \
- .flags = SD_LOAD_BALANCE, \
- .last_balance = jiffies, \
- .balance_interval = 64, \
- .nr_balance_failed = 0, \
-}
-
-#endif /* CONFIG_NUMA */
-
-#include <asm-generic/topology.h>
-
-#endif /* _ASM_IA64_TOPOLOGY_H */
diff --git a/xen/include/asm-ia64/linux/asm/unaligned.h b/xen/include/asm-ia64/linux/asm/unaligned.h
deleted file mode 100644
index bb85598881..0000000000
--- a/xen/include/asm-ia64/linux/asm/unaligned.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_IA64_UNALIGNED_H
-#define _ASM_IA64_UNALIGNED_H
-
-#include <asm-generic/unaligned.h>
-
-#endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/xen/include/asm-ia64/linux/asm/unistd.h b/xen/include/asm-ia64/linux/asm/unistd.h
deleted file mode 100644
index 3a0c695246..0000000000
--- a/xen/include/asm-ia64/linux/asm/unistd.h
+++ /dev/null
@@ -1,405 +0,0 @@
-#ifndef _ASM_IA64_UNISTD_H
-#define _ASM_IA64_UNISTD_H
-
-/*
- * IA-64 Linux syscall numbers and inline-functions.
- *
- * Copyright (C) 1998-2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <asm/break.h>
-
-#define __BREAK_SYSCALL __IA64_BREAK_SYSCALL
-
-#define __NR_ni_syscall 1024
-#define __NR_exit 1025
-#define __NR_read 1026
-#define __NR_write 1027
-#define __NR_open 1028
-#define __NR_close 1029
-#define __NR_creat 1030
-#define __NR_link 1031
-#define __NR_unlink 1032
-#define __NR_execve 1033
-#define __NR_chdir 1034
-#define __NR_fchdir 1035
-#define __NR_utimes 1036
-#define __NR_mknod 1037
-#define __NR_chmod 1038
-#define __NR_chown 1039
-#define __NR_lseek 1040
-#define __NR_getpid 1041
-#define __NR_getppid 1042
-#define __NR_mount 1043
-#define __NR_umount 1044
-#define __NR_setuid 1045
-#define __NR_getuid 1046
-#define __NR_geteuid 1047
-#define __NR_ptrace 1048
-#define __NR_access 1049
-#define __NR_sync 1050
-#define __NR_fsync 1051
-#define __NR_fdatasync 1052
-#define __NR_kill 1053
-#define __NR_rename 1054
-#define __NR_mkdir 1055
-#define __NR_rmdir 1056
-#define __NR_dup 1057
-#define __NR_pipe 1058
-#define __NR_times 1059
-#define __NR_brk 1060
-#define __NR_setgid 1061
-#define __NR_getgid 1062
-#define __NR_getegid 1063
-#define __NR_acct 1064
-#define __NR_ioctl 1065
-#define __NR_fcntl 1066
-#define __NR_umask 1067
-#define __NR_chroot 1068
-#define __NR_ustat 1069
-#define __NR_dup2 1070
-#define __NR_setreuid 1071
-#define __NR_setregid 1072
-#define __NR_getresuid 1073
-#define __NR_setresuid 1074
-#define __NR_getresgid 1075
-#define __NR_setresgid 1076
-#define __NR_getgroups 1077
-#define __NR_setgroups 1078
-#define __NR_getpgid 1079
-#define __NR_setpgid 1080
-#define __NR_setsid 1081
-#define __NR_getsid 1082
-#define __NR_sethostname 1083
-#define __NR_setrlimit 1084
-#define __NR_getrlimit 1085
-#define __NR_getrusage 1086
-#define __NR_gettimeofday 1087
-#define __NR_settimeofday 1088
-#define __NR_select 1089
-#define __NR_poll 1090
-#define __NR_symlink 1091
-#define __NR_readlink 1092
-#define __NR_uselib 1093
-#define __NR_swapon 1094
-#define __NR_swapoff 1095
-#define __NR_reboot 1096
-#define __NR_truncate 1097
-#define __NR_ftruncate 1098
-#define __NR_fchmod 1099
-#define __NR_fchown 1100
-#define __NR_getpriority 1101
-#define __NR_setpriority 1102
-#define __NR_statfs 1103
-#define __NR_fstatfs 1104
-#define __NR_gettid 1105
-#define __NR_semget 1106
-#define __NR_semop 1107
-#define __NR_semctl 1108
-#define __NR_msgget 1109
-#define __NR_msgsnd 1110
-#define __NR_msgrcv 1111
-#define __NR_msgctl 1112
-#define __NR_shmget 1113
-#define __NR_shmat 1114
-#define __NR_shmdt 1115
-#define __NR_shmctl 1116
-/* also known as klogctl() in GNU libc: */
-#define __NR_syslog 1117
-#define __NR_setitimer 1118
-#define __NR_getitimer 1119
-/* 1120 was __NR_old_stat */
-/* 1121 was __NR_old_lstat */
-/* 1122 was __NR_old_fstat */
-#define __NR_vhangup 1123
-#define __NR_lchown 1124
-#define __NR_remap_file_pages 1125
-#define __NR_wait4 1126
-#define __NR_sysinfo 1127
-#define __NR_clone 1128
-#define __NR_setdomainname 1129
-#define __NR_uname 1130
-#define __NR_adjtimex 1131
-/* 1132 was __NR_create_module */
-#define __NR_init_module 1133
-#define __NR_delete_module 1134
-/* 1135 was __NR_get_kernel_syms */
-/* 1136 was __NR_query_module */
-#define __NR_quotactl 1137
-#define __NR_bdflush 1138
-#define __NR_sysfs 1139
-#define __NR_personality 1140
-#define __NR_afs_syscall 1141
-#define __NR_setfsuid 1142
-#define __NR_setfsgid 1143
-#define __NR_getdents 1144
-#define __NR_flock 1145
-#define __NR_readv 1146
-#define __NR_writev 1147
-#define __NR_pread64 1148
-#define __NR_pwrite64 1149
-#define __NR__sysctl 1150
-#define __NR_mmap 1151
-#define __NR_munmap 1152
-#define __NR_mlock 1153
-#define __NR_mlockall 1154
-#define __NR_mprotect 1155
-#define __NR_mremap 1156
-#define __NR_msync 1157
-#define __NR_munlock 1158
-#define __NR_munlockall 1159
-#define __NR_sched_getparam 1160
-#define __NR_sched_setparam 1161
-#define __NR_sched_getscheduler 1162
-#define __NR_sched_setscheduler 1163
-#define __NR_sched_yield 1164
-#define __NR_sched_get_priority_max 1165
-#define __NR_sched_get_priority_min 1166
-#define __NR_sched_rr_get_interval 1167
-#define __NR_nanosleep 1168
-#define __NR_nfsservctl 1169
-#define __NR_prctl 1170
-/* 1171 is reserved for backwards compatibility with old __NR_getpagesize */
-#define __NR_mmap2 1172
-#define __NR_pciconfig_read 1173
-#define __NR_pciconfig_write 1174
-#define __NR_perfmonctl 1175
-#define __NR_sigaltstack 1176
-#define __NR_rt_sigaction 1177
-#define __NR_rt_sigpending 1178
-#define __NR_rt_sigprocmask 1179
-#define __NR_rt_sigqueueinfo 1180
-#define __NR_rt_sigreturn 1181
-#define __NR_rt_sigsuspend 1182
-#define __NR_rt_sigtimedwait 1183
-#define __NR_getcwd 1184
-#define __NR_capget 1185
-#define __NR_capset 1186
-#define __NR_sendfile 1187
-#define __NR_getpmsg 1188
-#define __NR_putpmsg 1189
-#define __NR_socket 1190
-#define __NR_bind 1191
-#define __NR_connect 1192
-#define __NR_listen 1193
-#define __NR_accept 1194
-#define __NR_getsockname 1195
-#define __NR_getpeername 1196
-#define __NR_socketpair 1197
-#define __NR_send 1198
-#define __NR_sendto 1199
-#define __NR_recv 1200
-#define __NR_recvfrom 1201
-#define __NR_shutdown 1202
-#define __NR_setsockopt 1203
-#define __NR_getsockopt 1204
-#define __NR_sendmsg 1205
-#define __NR_recvmsg 1206
-#define __NR_pivot_root 1207
-#define __NR_mincore 1208
-#define __NR_madvise 1209
-#define __NR_stat 1210
-#define __NR_lstat 1211
-#define __NR_fstat 1212
-#define __NR_clone2 1213
-#define __NR_getdents64 1214
-#define __NR_getunwind 1215
-#define __NR_readahead 1216
-#define __NR_setxattr 1217
-#define __NR_lsetxattr 1218
-#define __NR_fsetxattr 1219
-#define __NR_getxattr 1220
-#define __NR_lgetxattr 1221
-#define __NR_fgetxattr 1222
-#define __NR_listxattr 1223
-#define __NR_llistxattr 1224
-#define __NR_flistxattr 1225
-#define __NR_removexattr 1226
-#define __NR_lremovexattr 1227
-#define __NR_fremovexattr 1228
-#define __NR_tkill 1229
-#define __NR_futex 1230
-#define __NR_sched_setaffinity 1231
-#define __NR_sched_getaffinity 1232
-#define __NR_set_tid_address 1233
-#define __NR_fadvise64 1234
-#define __NR_tgkill 1235
-#define __NR_exit_group 1236
-#define __NR_lookup_dcookie 1237
-#define __NR_io_setup 1238
-#define __NR_io_destroy 1239
-#define __NR_io_getevents 1240
-#define __NR_io_submit 1241
-#define __NR_io_cancel 1242
-#define __NR_epoll_create 1243
-#define __NR_epoll_ctl 1244
-#define __NR_epoll_wait 1245
-#define __NR_restart_syscall 1246
-#define __NR_semtimedop 1247
-#define __NR_timer_create 1248
-#define __NR_timer_settime 1249
-#define __NR_timer_gettime 1250
-#define __NR_timer_getoverrun 1251
-#define __NR_timer_delete 1252
-#define __NR_clock_settime 1253
-#define __NR_clock_gettime 1254
-#define __NR_clock_getres 1255
-#define __NR_clock_nanosleep 1256
-#define __NR_fstatfs64 1257
-#define __NR_statfs64 1258
-#define __NR_mbind 1259
-#define __NR_get_mempolicy 1260
-#define __NR_set_mempolicy 1261
-#define __NR_mq_open 1262
-#define __NR_mq_unlink 1263
-#define __NR_mq_timedsend 1264
-#define __NR_mq_timedreceive 1265
-#define __NR_mq_notify 1266
-#define __NR_mq_getsetattr 1267
-#define __NR_kexec_load 1268
-#define __NR_vserver 1269
-#define __NR_waitid 1270
-#define __NR_add_key 1271
-#define __NR_request_key 1272
-#define __NR_keyctl 1273
-#define __NR_ioprio_set 1274
-#define __NR_ioprio_get 1275
-#define __NR_set_zone_reclaim 1276
-#define __NR_inotify_init 1277
-#define __NR_inotify_add_watch 1278
-#define __NR_inotify_rm_watch 1279
-
-#ifdef __KERNEL__
-
-#include <linux/config.h>
-
-#define NR_syscalls 256 /* length of syscall table */
-
-#define __ARCH_WANT_SYS_RT_SIGACTION
-
-#ifdef CONFIG_IA32_SUPPORT
-# define __ARCH_WANT_SYS_FADVISE64
-# define __ARCH_WANT_SYS_GETPGRP
-# define __ARCH_WANT_SYS_LLSEEK
-# define __ARCH_WANT_SYS_NICE
-# define __ARCH_WANT_SYS_OLD_GETRLIMIT
-# define __ARCH_WANT_SYS_OLDUMOUNT
-# define __ARCH_WANT_SYS_SIGPENDING
-# define __ARCH_WANT_SYS_SIGPROCMASK
-# define __ARCH_WANT_COMPAT_SYS_TIME
-#endif
-
-#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
-
-#include <linux/types.h>
-#include <linux/linkage.h>
-#include <linux/compiler.h>
-
-extern long __ia64_syscall (long a0, long a1, long a2, long a3, long a4, long nr);
-
-#ifdef __KERNEL_SYSCALLS__
-
-#include <linux/compiler.h>
-#include <linux/string.h>
-#include <linux/signal.h>
-#include <asm/ptrace.h>
-#include <linux/stringify.h>
-#include <linux/syscalls.h>
-
-static inline long
-open (const char * name, int mode, int flags)
-{
- return sys_open(name, mode, flags);
-}
-
-static inline long
-dup (int fd)
-{
- return sys_dup(fd);
-}
-
-static inline long
-close (int fd)
-{
- return sys_close(fd);
-}
-
-static inline off_t
-lseek (int fd, off_t off, int whence)
-{
- return sys_lseek(fd, off, whence);
-}
-
-static inline void
-_exit (int value)
-{
- sys_exit(value);
-}
-
-#define exit(x) _exit(x)
-
-static inline long
-write (int fd, const char * buf, size_t nr)
-{
- return sys_write(fd, buf, nr);
-}
-
-static inline long
-read (int fd, char * buf, size_t nr)
-{
- return sys_read(fd, buf, nr);
-}
-
-
-static inline long
-setsid (void)
-{
- return sys_setsid();
-}
-
-static inline pid_t
-waitpid (int pid, int * wait_stat, int flags)
-{
- return sys_wait4(pid, wait_stat, flags, NULL);
-}
-
-
-extern int execve (const char *filename, char *const av[], char *const ep[]);
-extern pid_t clone (unsigned long flags, void *sp);
-
-#endif /* __KERNEL_SYSCALLS__ */
-
-asmlinkage unsigned long sys_mmap(
- unsigned long addr, unsigned long len,
- int prot, int flags,
- int fd, long off);
-asmlinkage unsigned long sys_mmap2(
- unsigned long addr, unsigned long len,
- int prot, int flags,
- int fd, long pgoff);
-struct pt_regs;
-struct sigaction;
-long sys_execve(char __user *filename, char __user * __user *argv,
- char __user * __user *envp, struct pt_regs *regs);
-asmlinkage long sys_pipe(void);
-asmlinkage long sys_ptrace(long request, pid_t pid,
- unsigned long addr, unsigned long data);
-asmlinkage long sys_rt_sigaction(int sig,
- const struct sigaction __user *act,
- struct sigaction __user *oact,
- size_t sigsetsize);
-
-/*
- * "Conditional" syscalls
- *
- * Note, this macro can only be used in the file which defines sys_ni_syscall, i.e., in
- * kernel/sys_ni.c. This version causes warnings because the declaration isn't a
- * proper prototype, but we can't use __typeof__ either, because not all cond_syscall()
- * declarations have prototypes at the moment.
- */
-#define cond_syscall(x) asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall")))
-
-#endif /* !__ASSEMBLY__ */
-#endif /* __KERNEL__ */
-#endif /* _ASM_IA64_UNISTD_H */
diff --git a/xen/include/asm-ia64/linux/asm/unwind.h b/xen/include/asm-ia64/linux/asm/unwind.h
deleted file mode 100644
index 61426ad3ec..0000000000
--- a/xen/include/asm-ia64/linux/asm/unwind.h
+++ /dev/null
@@ -1,240 +0,0 @@
-#ifndef _ASM_IA64_UNWIND_H
-#define _ASM_IA64_UNWIND_H
-
-/*
- * Copyright (C) 1999-2000, 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * A simple API for unwinding kernel stacks. This is used for
- * debugging and error reporting purposes. The kernel doesn't need
- * full-blown stack unwinding with all the bells and whitles, so there
- * is not much point in implementing the full IA-64 unwind API (though
- * it would of course be possible to implement the kernel API on top
- * of it).
- */
-
-struct task_struct; /* forward declaration */
-struct switch_stack; /* forward declaration */
-
-enum unw_application_register {
- UNW_AR_BSP,
- UNW_AR_BSPSTORE,
- UNW_AR_PFS,
- UNW_AR_RNAT,
- UNW_AR_UNAT,
- UNW_AR_LC,
- UNW_AR_EC,
- UNW_AR_FPSR,
- UNW_AR_RSC,
- UNW_AR_CCV,
- UNW_AR_CSD,
- UNW_AR_SSD
-};
-
-/*
- * The following declarations are private to the unwind
- * implementation:
- */
-
-struct unw_stack {
- unsigned long limit;
- unsigned long top;
-};
-
-#define UNW_FLAG_INTERRUPT_FRAME (1UL << 0)
-
-/*
- * No user of this module should every access this structure directly
- * as it is subject to change. It is declared here solely so we can
- * use automatic variables.
- */
-struct unw_frame_info {
- struct unw_stack regstk;
- struct unw_stack memstk;
- unsigned int flags;
- short hint;
- short prev_script;
-
- /* current frame info: */
- unsigned long bsp; /* backing store pointer value */
- unsigned long sp; /* stack pointer value */
- unsigned long psp; /* previous sp value */
- unsigned long ip; /* instruction pointer value */
- unsigned long pr; /* current predicate values */
- unsigned long *cfm_loc; /* cfm save location (or NULL) */
- unsigned long pt; /* struct pt_regs location */
-
- struct task_struct *task;
- struct switch_stack *sw;
-
- /* preserved state: */
- unsigned long *bsp_loc; /* previous bsp save location */
- unsigned long *bspstore_loc;
- unsigned long *pfs_loc;
- unsigned long *rnat_loc;
- unsigned long *rp_loc;
- unsigned long *pri_unat_loc;
- unsigned long *unat_loc;
- unsigned long *pr_loc;
- unsigned long *lc_loc;
- unsigned long *fpsr_loc;
- struct unw_ireg {
- unsigned long *loc;
- struct unw_ireg_nat {
- long type : 3; /* enum unw_nat_type */
- signed long off : 61; /* NaT word is at loc+nat.off */
- } nat;
- } r4, r5, r6, r7;
- unsigned long *b1_loc, *b2_loc, *b3_loc, *b4_loc, *b5_loc;
- struct ia64_fpreg *f2_loc, *f3_loc, *f4_loc, *f5_loc, *fr_loc[16];
-};
-
-/*
- * The official API follows below:
- */
-
-struct unw_table_entry {
- u64 start_offset;
- u64 end_offset;
- u64 info_offset;
-};
-
-/*
- * Initialize unwind support.
- */
-extern void unw_init (void);
-
-extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
- const void *table_start, const void *table_end);
-
-extern void unw_remove_unwind_table (void *handle);
-
-/*
- * Prepare to unwind blocked task t.
- */
-extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t);
-
-/*
- * Prepare to unwind from interruption. The pt-regs and switch-stack structures must have
- * be "adjacent" (no state modifications between pt-regs and switch-stack).
- */
-extern void unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
- struct pt_regs *pt, struct switch_stack *sw);
-
-extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t,
- struct switch_stack *sw);
-
-/*
- * Prepare to unwind the currently running thread.
- */
-extern void unw_init_running (void (*callback)(struct unw_frame_info *info, void *arg), void *arg);
-
-/*
- * Unwind to previous to frame. Returns 0 if successful, negative
- * number in case of an error.
- */
-extern int unw_unwind (struct unw_frame_info *info);
-
-/*
- * Unwind until the return pointer is in user-land (or until an error
- * occurs). Returns 0 if successful, negative number in case of
- * error.
- */
-extern int unw_unwind_to_user (struct unw_frame_info *info);
-
-#define unw_is_intr_frame(info) (((info)->flags & UNW_FLAG_INTERRUPT_FRAME) != 0)
-
-static inline int
-unw_get_ip (struct unw_frame_info *info, unsigned long *valp)
-{
- *valp = (info)->ip;
- return 0;
-}
-
-static inline int
-unw_get_sp (struct unw_frame_info *info, unsigned long *valp)
-{
- *valp = (info)->sp;
- return 0;
-}
-
-static inline int
-unw_get_psp (struct unw_frame_info *info, unsigned long *valp)
-{
- *valp = (info)->psp;
- return 0;
-}
-
-static inline int
-unw_get_bsp (struct unw_frame_info *info, unsigned long *valp)
-{
- *valp = (info)->bsp;
- return 0;
-}
-
-static inline int
-unw_get_cfm (struct unw_frame_info *info, unsigned long *valp)
-{
- *valp = *(info)->cfm_loc;
- return 0;
-}
-
-static inline int
-unw_set_cfm (struct unw_frame_info *info, unsigned long val)
-{
- *(info)->cfm_loc = val;
- return 0;
-}
-
-static inline int
-unw_get_rp (struct unw_frame_info *info, unsigned long *val)
-{
- if (!info->rp_loc)
- return -1;
- *val = *info->rp_loc;
- return 0;
-}
-
-extern int unw_access_gr (struct unw_frame_info *, int, unsigned long *, char *, int);
-extern int unw_access_br (struct unw_frame_info *, int, unsigned long *, int);
-extern int unw_access_fr (struct unw_frame_info *, int, struct ia64_fpreg *, int);
-extern int unw_access_ar (struct unw_frame_info *, int, unsigned long *, int);
-extern int unw_access_pr (struct unw_frame_info *, unsigned long *, int);
-
-static inline int
-unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat)
-{
- return unw_access_gr(i, n, &v, &nat, 1);
-}
-
-static inline int
-unw_set_br (struct unw_frame_info *i, int n, unsigned long v)
-{
- return unw_access_br(i, n, &v, 1);
-}
-
-static inline int
-unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v)
-{
- return unw_access_fr(i, n, &v, 1);
-}
-
-static inline int
-unw_set_ar (struct unw_frame_info *i, int n, unsigned long v)
-{
- return unw_access_ar(i, n, &v, 1);
-}
-
-static inline int
-unw_set_pr (struct unw_frame_info *i, unsigned long v)
-{
- return unw_access_pr(i, &v, 1);
-}
-
-#define unw_get_gr(i,n,v,nat) unw_access_gr(i,n,v,nat,0)
-#define unw_get_br(i,n,v) unw_access_br(i,n,v,0)
-#define unw_get_fr(i,n,v) unw_access_fr(i,n,v,0)
-#define unw_get_ar(i,n,v) unw_access_ar(i,n,v,0)
-#define unw_get_pr(i,v) unw_access_pr(i,v,0)
-
-#endif /* _ASM_UNWIND_H */
diff --git a/xen/include/asm-ia64/linux/bcd.h b/xen/include/asm-ia64/linux/bcd.h
deleted file mode 100644
index c545308125..0000000000
--- a/xen/include/asm-ia64/linux/bcd.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Permission is hereby granted to copy, modify and redistribute this code
- * in terms of the GNU Library General Public License, Version 2 or later,
- * at your option.
- */
-
-/* macros to translate to/from binary and binary-coded decimal (frequently
- * found in RTC chips).
- */
-
-#ifndef _BCD_H
-#define _BCD_H
-
-#define BCD2BIN(val) (((val) & 0x0f) + ((val)>>4)*10)
-#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10)
-
-/* backwards compat */
-#define BCD_TO_BIN(val) ((val)=BCD2BIN(val))
-#define BIN_TO_BCD(val) ((val)=BIN2BCD(val))
-
-#endif /* _BCD_H */
diff --git a/xen/include/asm-ia64/linux/bitmap.h b/xen/include/asm-ia64/linux/bitmap.h
deleted file mode 100644
index 86dd5502b0..0000000000
--- a/xen/include/asm-ia64/linux/bitmap.h
+++ /dev/null
@@ -1,261 +0,0 @@
-#ifndef __LINUX_BITMAP_H
-#define __LINUX_BITMAP_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/string.h>
-
-/*
- * bitmaps provide bit arrays that consume one or more unsigned
- * longs. The bitmap interface and available operations are listed
- * here, in bitmap.h
- *
- * Function implementations generic to all architectures are in
- * lib/bitmap.c. Functions implementations that are architecture
- * specific are in various include/asm-<arch>/bitops.h headers
- * and other arch/<arch> specific files.
- *
- * See lib/bitmap.c for more details.
- */
-
-/*
- * The available bitmap operations and their rough meaning in the
- * case that the bitmap is a single unsigned long are thus:
- *
- * bitmap_zero(dst, nbits) *dst = 0UL
- * bitmap_fill(dst, nbits) *dst = ~0UL
- * bitmap_copy(dst, src, nbits) *dst = *src
- * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
- * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
- * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
- * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
- * bitmap_complement(dst, src, nbits) *dst = ~(*src)
- * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
- * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
- * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
- * bitmap_empty(src, nbits) Are all bits zero in *src?
- * bitmap_full(src, nbits) Are all bits set in *src?
- * bitmap_weight(src, nbits) Hamming Weight: number set bits
- * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
- * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
- * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
- * bitmap_parse(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
- * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
- * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from list
- */
-
-/*
- * Also the following operations in asm/bitops.h apply to bitmaps.
- *
- * set_bit(bit, addr) *addr |= bit
- * clear_bit(bit, addr) *addr &= ~bit
- * change_bit(bit, addr) *addr ^= bit
- * test_bit(bit, addr) Is bit set in *addr?
- * test_and_set_bit(bit, addr) Set bit and return old value
- * test_and_clear_bit(bit, addr) Clear bit and return old value
- * test_and_change_bit(bit, addr) Change bit and return old value
- * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
- * find_first_bit(addr, nbits) Position first set bit in *addr
- * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
- * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
- */
-
-/*
- * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
- * to declare an array named 'name' of just enough unsigned longs to
- * contain all bit positions from 0 to 'bits' - 1.
- */
-
-/*
- * lib/bitmap.c provides these functions:
- */
-
-extern int __bitmap_empty(const unsigned long *bitmap, int bits);
-extern int __bitmap_full(const unsigned long *bitmap, int bits);
-extern int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits);
-extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
- int bits);
-extern void __bitmap_shift_right(unsigned long *dst,
- const unsigned long *src, int shift, int bits);
-extern void __bitmap_shift_left(unsigned long *dst,
- const unsigned long *src, int shift, int bits);
-extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits);
-extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits);
-extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits);
-extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits);
-extern int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits);
-extern int __bitmap_subset(const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits);
-extern int __bitmap_weight(const unsigned long *bitmap, int bits);
-
-extern int bitmap_scnprintf(char *buf, unsigned int len,
- const unsigned long *src, int nbits);
-extern int bitmap_parse(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
-extern int bitmap_scnlistprintf(char *buf, unsigned int len,
- const unsigned long *src, int nbits);
-extern int bitmap_parselist(const char *buf, unsigned long *maskp,
- int nmaskbits);
-extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
-extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
-extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
-
-#define BITMAP_LAST_WORD_MASK(nbits) \
-( \
- ((nbits) % BITS_PER_LONG) ? \
- (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
-)
-
-static inline void bitmap_zero(unsigned long *dst, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = 0UL;
- else {
- int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0, len);
- }
-}
-
-static inline void bitmap_fill(unsigned long *dst, int nbits)
-{
- size_t nlongs = BITS_TO_LONGS(nbits);
- if (nlongs > 1) {
- int len = (nlongs - 1) * sizeof(unsigned long);
- memset(dst, 0xff, len);
- }
- dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
-}
-
-static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
- int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = *src;
- else {
- int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memcpy(dst, src, len);
- }
-}
-
-static inline void bitmap_and(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = *src1 & *src2;
- else
- __bitmap_and(dst, src1, src2, nbits);
-}
-
-static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = *src1 | *src2;
- else
- __bitmap_or(dst, src1, src2, nbits);
-}
-
-static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = *src1 ^ *src2;
- else
- __bitmap_xor(dst, src1, src2, nbits);
-}
-
-static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = *src1 & ~(*src2);
- else
- __bitmap_andnot(dst, src1, src2, nbits);
-}
-
-static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
- int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
- else
- __bitmap_complement(dst, src, nbits);
-}
-
-static inline int bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_equal(src1, src2, nbits);
-}
-
-static inline int bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
- else
- return __bitmap_intersects(src1, src2, nbits);
-}
-
-static inline int bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_subset(src1, src2, nbits);
-}
-
-static inline int bitmap_empty(const unsigned long *src, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_empty(src, nbits);
-}
-
-static inline int bitmap_full(const unsigned long *src, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_full(src, nbits);
-}
-
-static inline int bitmap_weight(const unsigned long *src, int nbits)
-{
- return __bitmap_weight(src, nbits);
-}
-
-static inline void bitmap_shift_right(unsigned long *dst,
- const unsigned long *src, int n, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = *src >> n;
- else
- __bitmap_shift_right(dst, src, n, nbits);
-}
-
-static inline void bitmap_shift_left(unsigned long *dst,
- const unsigned long *src, int n, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
- else
- __bitmap_shift_left(dst, src, n, nbits);
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __LINUX_BITMAP_H */
diff --git a/xen/include/asm-ia64/linux/bitops.h b/xen/include/asm-ia64/linux/bitops.h
deleted file mode 100644
index cb3c3ef50f..0000000000
--- a/xen/include/asm-ia64/linux/bitops.h
+++ /dev/null
@@ -1,159 +0,0 @@
-#ifndef _LINUX_BITOPS_H
-#define _LINUX_BITOPS_H
-#include <asm/types.h>
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-
-static inline int generic_ffs(int x)
-{
- int r = 1;
-
- if (!x)
- return 0;
- if (!(x & 0xffff)) {
- x >>= 16;
- r += 16;
- }
- if (!(x & 0xff)) {
- x >>= 8;
- r += 8;
- }
- if (!(x & 0xf)) {
- x >>= 4;
- r += 4;
- }
- if (!(x & 3)) {
- x >>= 2;
- r += 2;
- }
- if (!(x & 1)) {
- x >>= 1;
- r += 1;
- }
- return r;
-}
-
-/*
- * fls: find last bit set.
- */
-
-static __inline__ int generic_fls(int x)
-{
- int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-
-/*
- * Include this here because some architectures need generic_ffs/fls in
- * scope
- */
-#include <asm/bitops.h>
-
-static __inline__ int get_bitmask_order(unsigned int count)
-{
- int order;
-
- order = fls(count);
- return order; /* We could be slightly more clever with -1 here... */
-}
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-static inline unsigned int generic_hweight32(unsigned int w)
-{
- unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555);
- res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
- res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F);
- res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF);
- return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF);
-}
-
-static inline unsigned int generic_hweight16(unsigned int w)
-{
- unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555);
- res = (res & 0x3333) + ((res >> 2) & 0x3333);
- res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F);
- return (res & 0x00FF) + ((res >> 8) & 0x00FF);
-}
-
-static inline unsigned int generic_hweight8(unsigned int w)
-{
- unsigned int res = (w & 0x55) + ((w >> 1) & 0x55);
- res = (res & 0x33) + ((res >> 2) & 0x33);
- return (res & 0x0F) + ((res >> 4) & 0x0F);
-}
-
-static inline unsigned long generic_hweight64(__u64 w)
-{
-#if BITS_PER_LONG < 64
- return generic_hweight32((unsigned int)(w >> 32)) +
- generic_hweight32((unsigned int)w);
-#else
- u64 res;
- res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul);
- res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
- res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful);
- res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul);
- res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul);
- return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul);
-#endif
-}
-
-static inline unsigned long hweight_long(unsigned long w)
-{
- return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w);
-}
-
-/*
- * rol32 - rotate a 32-bit value left
- *
- * @word: value to rotate
- * @shift: bits to roll
- */
-static inline __u32 rol32(__u32 word, unsigned int shift)
-{
- return (word << shift) | (word >> (32 - shift));
-}
-
-/*
- * ror32 - rotate a 32-bit value right
- *
- * @word: value to rotate
- * @shift: bits to roll
- */
-static inline __u32 ror32(__u32 word, unsigned int shift)
-{
- return (word >> shift) | (word << (32 - shift));
-}
-
-#endif
diff --git a/xen/include/asm-ia64/linux/byteorder/README.origin b/xen/include/asm-ia64/linux/byteorder/README.origin
deleted file mode 100644
index ba92292a39..0000000000
--- a/xen/include/asm-ia64/linux/byteorder/README.origin
+++ /dev/null
@@ -1,9 +0,0 @@
-# Source files in this directory are identical copies of linux-2.6.13 files:
-#
-# NOTE: DO NOT commit changes to these files! If a file
-# needs to be changed, move it to ../linux-xen and follow
-# the instructions in the README there.
-
-generic.h -> linux/include/linux/byteorder/generic.h
-little_endian.h -> linux/include/linux/byteorder/little_endian.h
-swab.h -> linux/include/linux/byteorder/swab.h
diff --git a/xen/include/asm-ia64/linux/byteorder/generic.h b/xen/include/asm-ia64/linux/byteorder/generic.h
deleted file mode 100644
index 5fde6f4d6c..0000000000
--- a/xen/include/asm-ia64/linux/byteorder/generic.h
+++ /dev/null
@@ -1,172 +0,0 @@
-#ifndef _LINUX_BYTEORDER_GENERIC_H
-#define _LINUX_BYTEORDER_GENERIC_H
-
-/*
- * linux/byteorder_generic.h
- * Generic Byte-reordering support
- *
- * Francois-Rene Rideau <fare@tunes.org> 19970707
- * gathered all the good ideas from all asm-foo/byteorder.h into one file,
- * cleaned them up.
- * I hope it is compliant with non-GCC compilers.
- * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
- * because I wasn't sure it would be ok to put it in types.h
- * Upgraded it to 2.1.43
- * Francois-Rene Rideau <fare@tunes.org> 19971012
- * Upgraded it to 2.1.57
- * to please Linus T., replaced huge #ifdef's between little/big endian
- * by nestedly #include'd files.
- * Francois-Rene Rideau <fare@tunes.org> 19971205
- * Made it to 2.1.71; now a facelift:
- * Put files under include/linux/byteorder/
- * Split swab from generic support.
- *
- * TODO:
- * = Regular kernel maintainers could also replace all these manual
- * byteswap macros that remain, disseminated among drivers,
- * after some grep or the sources...
- * = Linus might want to rename all these macros and files to fit his taste,
- * to fit his personal naming scheme.
- * = it seems that a few drivers would also appreciate
- * nybble swapping support...
- * = every architecture could add their byteswap macro in asm/byteorder.h
- * see how some architectures already do (i386, alpha, ppc, etc)
- * = cpu_to_beXX and beXX_to_cpu might some day need to be well
- * distinguished throughout the kernel. This is not the case currently,
- * since little endian, big endian, and pdp endian machines needn't it.
- * But this might be the case for, say, a port of Linux to 20/21 bit
- * architectures (and F21 Linux addict around?).
- */
-
-/*
- * The following macros are to be defined by <asm/byteorder.h>:
- *
- * Conversion of long and short int between network and host format
- * ntohl(__u32 x)
- * ntohs(__u16 x)
- * htonl(__u32 x)
- * htons(__u16 x)
- * It seems that some programs (which? where? or perhaps a standard? POSIX?)
- * might like the above to be functions, not macros (why?).
- * if that's true, then detect them, and take measures.
- * Anyway, the measure is: define only ___ntohl as a macro instead,
- * and in a separate file, have
- * unsigned long inline ntohl(x){return ___ntohl(x);}
- *
- * The same for constant arguments
- * __constant_ntohl(__u32 x)
- * __constant_ntohs(__u16 x)
- * __constant_htonl(__u32 x)
- * __constant_htons(__u16 x)
- *
- * Conversion of XX-bit integers (16- 32- or 64-)
- * between native CPU format and little/big endian format
- * 64-bit stuff only defined for proper architectures
- * cpu_to_[bl]eXX(__uXX x)
- * [bl]eXX_to_cpu(__uXX x)
- *
- * The same, but takes a pointer to the value to convert
- * cpu_to_[bl]eXXp(__uXX x)
- * [bl]eXX_to_cpup(__uXX x)
- *
- * The same, but change in situ
- * cpu_to_[bl]eXXs(__uXX x)
- * [bl]eXX_to_cpus(__uXX x)
- *
- * See asm-foo/byteorder.h for examples of how to provide
- * architecture-optimized versions
- *
- */
-
-
-#if defined(__KERNEL__)
-/*
- * inside the kernel, we can use nicknames;
- * outside of it, we must avoid POSIX namespace pollution...
- */
-#define cpu_to_le64 __cpu_to_le64
-#define le64_to_cpu __le64_to_cpu
-#define cpu_to_le32 __cpu_to_le32
-#define le32_to_cpu __le32_to_cpu
-#define cpu_to_le16 __cpu_to_le16
-#define le16_to_cpu __le16_to_cpu
-#define cpu_to_be64 __cpu_to_be64
-#define be64_to_cpu __be64_to_cpu
-#define cpu_to_be32 __cpu_to_be32
-#define be32_to_cpu __be32_to_cpu
-#define cpu_to_be16 __cpu_to_be16
-#define be16_to_cpu __be16_to_cpu
-#define cpu_to_le64p __cpu_to_le64p
-#define le64_to_cpup __le64_to_cpup
-#define cpu_to_le32p __cpu_to_le32p
-#define le32_to_cpup __le32_to_cpup
-#define cpu_to_le16p __cpu_to_le16p
-#define le16_to_cpup __le16_to_cpup
-#define cpu_to_be64p __cpu_to_be64p
-#define be64_to_cpup __be64_to_cpup
-#define cpu_to_be32p __cpu_to_be32p
-#define be32_to_cpup __be32_to_cpup
-#define cpu_to_be16p __cpu_to_be16p
-#define be16_to_cpup __be16_to_cpup
-#define cpu_to_le64s __cpu_to_le64s
-#define le64_to_cpus __le64_to_cpus
-#define cpu_to_le32s __cpu_to_le32s
-#define le32_to_cpus __le32_to_cpus
-#define cpu_to_le16s __cpu_to_le16s
-#define le16_to_cpus __le16_to_cpus
-#define cpu_to_be64s __cpu_to_be64s
-#define be64_to_cpus __be64_to_cpus
-#define cpu_to_be32s __cpu_to_be32s
-#define be32_to_cpus __be32_to_cpus
-#define cpu_to_be16s __cpu_to_be16s
-#define be16_to_cpus __be16_to_cpus
-#endif
-
-
-#if defined(__KERNEL__)
-/*
- * Handle ntohl and suches. These have various compatibility
- * issues - like we want to give the prototype even though we
- * also have a macro for them in case some strange program
- * wants to take the address of the thing or something..
- *
- * Note that these used to return a "long" in libc5, even though
- * long is often 64-bit these days.. Thus the casts.
- *
- * They have to be macros in order to do the constant folding
- * correctly - if the argument passed into a inline function
- * it is no longer constant according to gcc..
- */
-
-#undef ntohl
-#undef ntohs
-#undef htonl
-#undef htons
-
-/*
- * Do the prototypes. Somebody might want to take the
- * address or some such sick thing..
- */
-extern __u32 ntohl(__be32);
-extern __be32 htonl(__u32);
-extern __u16 ntohs(__be16);
-extern __be16 htons(__u16);
-
-#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
-
-#define ___htonl(x) __cpu_to_be32(x)
-#define ___htons(x) __cpu_to_be16(x)
-#define ___ntohl(x) __be32_to_cpu(x)
-#define ___ntohs(x) __be16_to_cpu(x)
-
-#define htonl(x) ___htonl(x)
-#define ntohl(x) ___ntohl(x)
-#define htons(x) ___htons(x)
-#define ntohs(x) ___ntohs(x)
-
-#endif /* OPTIMIZE */
-
-#endif /* KERNEL */
-
-
-#endif /* _LINUX_BYTEORDER_GENERIC_H */
diff --git a/xen/include/asm-ia64/linux/byteorder/little_endian.h b/xen/include/asm-ia64/linux/byteorder/little_endian.h
deleted file mode 100644
index 86e62b7501..0000000000
--- a/xen/include/asm-ia64/linux/byteorder/little_endian.h
+++ /dev/null
@@ -1,106 +0,0 @@
-#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H
-#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H
-
-#ifndef __LITTLE_ENDIAN
-#define __LITTLE_ENDIAN 1234
-#endif
-#ifndef __LITTLE_ENDIAN_BITFIELD
-#define __LITTLE_ENDIAN_BITFIELD
-#endif
-
-#include <linux/types.h>
-#include <linux/byteorder/swab.h>
-
-#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
-#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
-#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
-#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
-#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
-#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
-#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
-#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
-#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
-#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
-#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
-#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
-#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
-#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x))
-#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
-#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x))
-#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
-#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
-#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
-#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
-#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
-#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
-#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
-#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
-#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
-#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
-#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
-#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
-
-static inline __le64 __cpu_to_le64p(const __u64 *p)
-{
- return (__force __le64)*p;
-}
-static inline __u64 __le64_to_cpup(const __le64 *p)
-{
- return (__force __u64)*p;
-}
-static inline __le32 __cpu_to_le32p(const __u32 *p)
-{
- return (__force __le32)*p;
-}
-static inline __u32 __le32_to_cpup(const __le32 *p)
-{
- return (__force __u32)*p;
-}
-static inline __le16 __cpu_to_le16p(const __u16 *p)
-{
- return (__force __le16)*p;
-}
-static inline __u16 __le16_to_cpup(const __le16 *p)
-{
- return (__force __u16)*p;
-}
-static inline __be64 __cpu_to_be64p(const __u64 *p)
-{
- return (__force __be64)__swab64p(p);
-}
-static inline __u64 __be64_to_cpup(const __be64 *p)
-{
- return __swab64p((__u64 *)p);
-}
-static inline __be32 __cpu_to_be32p(const __u32 *p)
-{
- return (__force __be32)__swab32p(p);
-}
-static inline __u32 __be32_to_cpup(const __be32 *p)
-{
- return __swab32p((__u32 *)p);
-}
-static inline __be16 __cpu_to_be16p(const __u16 *p)
-{
- return (__force __be16)__swab16p(p);
-}
-static inline __u16 __be16_to_cpup(const __be16 *p)
-{
- return __swab16p((__u16 *)p);
-}
-#define __cpu_to_le64s(x) do {} while (0)
-#define __le64_to_cpus(x) do {} while (0)
-#define __cpu_to_le32s(x) do {} while (0)
-#define __le32_to_cpus(x) do {} while (0)
-#define __cpu_to_le16s(x) do {} while (0)
-#define __le16_to_cpus(x) do {} while (0)
-#define __cpu_to_be64s(x) __swab64s((x))
-#define __be64_to_cpus(x) __swab64s((x))
-#define __cpu_to_be32s(x) __swab32s((x))
-#define __be32_to_cpus(x) __swab32s((x))
-#define __cpu_to_be16s(x) __swab16s((x))
-#define __be16_to_cpus(x) __swab16s((x))
-
-#include <linux/byteorder/generic.h>
-
-#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */
diff --git a/xen/include/asm-ia64/linux/byteorder/swab.h b/xen/include/asm-ia64/linux/byteorder/swab.h
deleted file mode 100644
index 2f1cb77512..0000000000
--- a/xen/include/asm-ia64/linux/byteorder/swab.h
+++ /dev/null
@@ -1,192 +0,0 @@
-#ifndef _LINUX_BYTEORDER_SWAB_H
-#define _LINUX_BYTEORDER_SWAB_H
-
-/*
- * linux/byteorder/swab.h
- * Byte-swapping, independently from CPU endianness
- * swabXX[ps]?(foo)
- *
- * Francois-Rene Rideau <fare@tunes.org> 19971205
- * separated swab functions from cpu_to_XX,
- * to clean up support for bizarre-endian architectures.
- *
- * See asm-i386/byteorder.h and suches for examples of how to provide
- * architecture-dependent optimized versions
- *
- */
-
-#include <linux/compiler.h>
-
-/* casts are necessary for constants, because we never know how for sure
- * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
- */
-#define ___swab16(x) \
-({ \
- __u16 __x = (x); \
- ((__u16)( \
- (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
- (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
-})
-
-#define ___swab32(x) \
-({ \
- __u32 __x = (x); \
- ((__u32)( \
- (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
- (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
- (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
- (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
-})
-
-#define ___swab64(x) \
-({ \
- __u64 __x = (x); \
- ((__u64)( \
- (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
- (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
- (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
- (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
- (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
- (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
- (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
- (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
-})
-
-#define ___constant_swab16(x) \
- ((__u16)( \
- (((__u16)(x) & (__u16)0x00ffU) << 8) | \
- (((__u16)(x) & (__u16)0xff00U) >> 8) ))
-#define ___constant_swab32(x) \
- ((__u32)( \
- (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
- (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
- (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
- (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
-#define ___constant_swab64(x) \
- ((__u64)( \
- (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
- (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
- (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
- (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
- (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
- (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
- (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
- (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
-
-/*
- * provide defaults when no architecture-specific optimization is detected
- */
-#ifndef __arch__swab16
-# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
-#endif
-#ifndef __arch__swab32
-# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
-#endif
-#ifndef __arch__swab64
-# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
-#endif
-
-#ifndef __arch__swab16p
-# define __arch__swab16p(x) __arch__swab16(*(x))
-#endif
-#ifndef __arch__swab32p
-# define __arch__swab32p(x) __arch__swab32(*(x))
-#endif
-#ifndef __arch__swab64p
-# define __arch__swab64p(x) __arch__swab64(*(x))
-#endif
-
-#ifndef __arch__swab16s
-# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
-#endif
-#ifndef __arch__swab32s
-# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
-#endif
-#ifndef __arch__swab64s
-# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
-#endif
-
-
-/*
- * Allow constant folding
- */
-#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
-# define __swab16(x) \
-(__builtin_constant_p((__u16)(x)) ? \
- ___swab16((x)) : \
- __fswab16((x)))
-# define __swab32(x) \
-(__builtin_constant_p((__u32)(x)) ? \
- ___swab32((x)) : \
- __fswab32((x)))
-# define __swab64(x) \
-(__builtin_constant_p((__u64)(x)) ? \
- ___swab64((x)) : \
- __fswab64((x)))
-#else
-# define __swab16(x) __fswab16(x)
-# define __swab32(x) __fswab32(x)
-# define __swab64(x) __fswab64(x)
-#endif /* OPTIMIZE */
-
-
-static __inline__ __attribute_const__ __u16 __fswab16(__u16 x)
-{
- return __arch__swab16(x);
-}
-static __inline__ __u16 __swab16p(const __u16 *x)
-{
- return __arch__swab16p(x);
-}
-static __inline__ void __swab16s(__u16 *addr)
-{
- __arch__swab16s(addr);
-}
-
-static __inline__ __attribute_const__ __u32 __fswab32(__u32 x)
-{
- return __arch__swab32(x);
-}
-static __inline__ __u32 __swab32p(const __u32 *x)
-{
- return __arch__swab32p(x);
-}
-static __inline__ void __swab32s(__u32 *addr)
-{
- __arch__swab32s(addr);
-}
-
-#ifdef __BYTEORDER_HAS_U64__
-static __inline__ __attribute_const__ __u64 __fswab64(__u64 x)
-{
-# ifdef __SWAB_64_THRU_32__
- __u32 h = x >> 32;
- __u32 l = x & ((1ULL<<32)-1);
- return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
-# else
- return __arch__swab64(x);
-# endif
-}
-static __inline__ __u64 __swab64p(const __u64 *x)
-{
- return __arch__swab64p(x);
-}
-static __inline__ void __swab64s(__u64 *addr)
-{
- __arch__swab64s(addr);
-}
-#endif /* __BYTEORDER_HAS_U64__ */
-
-#if defined(__KERNEL__)
-#define swab16 __swab16
-#define swab32 __swab32
-#define swab64 __swab64
-#define swab16p __swab16p
-#define swab32p __swab32p
-#define swab64p __swab64p
-#define swab16s __swab16s
-#define swab32s __swab32s
-#define swab64s __swab64s
-#endif
-
-#endif /* _LINUX_BYTEORDER_SWAB_H */
diff --git a/xen/include/asm-ia64/linux/hash.h b/xen/include/asm-ia64/linux/hash.h
deleted file mode 100644
index acf17bb8e7..0000000000
--- a/xen/include/asm-ia64/linux/hash.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _LINUX_HASH_H
-#define _LINUX_HASH_H
-/* Fast hashing routine for a long.
- (C) 2002 William Lee Irwin III, IBM */
-
-/*
- * Knuth recommends primes in approximately golden ratio to the maximum
- * integer representable by a machine word for multiplicative hashing.
- * Chuck Lever verified the effectiveness of this technique:
- * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
- *
- * These primes are chosen to be bit-sparse, that is operations on
- * them can use shifts and additions instead of multiplications for
- * machines where multiplications are slow.
- */
-#if BITS_PER_LONG == 32
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define GOLDEN_RATIO_PRIME 0x9e370001UL
-#elif BITS_PER_LONG == 64
-/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
-#else
-#error Define GOLDEN_RATIO_PRIME for your wordsize.
-#endif
-
-static inline unsigned long hash_long(unsigned long val, unsigned int bits)
-{
- unsigned long hash = val;
-
-#if BITS_PER_LONG == 64
- /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
- unsigned long n = hash;
- n <<= 18;
- hash -= n;
- n <<= 33;
- hash -= n;
- n <<= 3;
- hash += n;
- n <<= 3;
- hash -= n;
- n <<= 4;
- hash += n;
- n <<= 2;
- hash += n;
-#else
- /* On some cpus multiply is faster, on others gcc will do shifts */
- hash *= GOLDEN_RATIO_PRIME;
-#endif
-
- /* High bits are more random, so use them. */
- return hash >> (BITS_PER_LONG - bits);
-}
-
-static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
-{
- return hash_long((unsigned long)ptr, bits);
-}
-#endif /* _LINUX_HASH_H */
diff --git a/xen/include/asm-ia64/linux/initrd.h b/xen/include/asm-ia64/linux/initrd.h
deleted file mode 100644
index 55289d261b..0000000000
--- a/xen/include/asm-ia64/linux/initrd.h
+++ /dev/null
@@ -1,20 +0,0 @@
-
-#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
-
-/* 1 = load ramdisk, 0 = don't load */
-extern int rd_doload;
-
-/* 1 = prompt for ramdisk, 0 = don't prompt */
-extern int rd_prompt;
-
-/* starting block # of image */
-extern int rd_image_start;
-
-/* 1 if it is not an error if initrd_start < memory_start */
-extern int initrd_below_start_ok;
-
-/* free_initrd_mem always gets called with the next two as arguments.. */
-extern unsigned long initrd_start, initrd_end;
-extern void free_initrd_mem(unsigned long, unsigned long);
-
-extern unsigned int real_root_dev;
diff --git a/xen/include/asm-ia64/linux/ioport.h b/xen/include/asm-ia64/linux/ioport.h
deleted file mode 100644
index d42c833990..0000000000
--- a/xen/include/asm-ia64/linux/ioport.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * ioport.h Definitions of routines for detecting, reserving and
- * allocating system resources.
- *
- * Authors: Linus Torvalds
- */
-
-#ifndef _LINUX_IOPORT_H
-#define _LINUX_IOPORT_H
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-/*
- * Resources are tree-like, allowing
- * nesting etc..
- */
-struct resource {
- resource_size_t start;
- resource_size_t end;
- const char *name;
- unsigned long flags;
- struct resource *parent, *sibling, *child;
-};
-
-struct resource_list {
- struct resource_list *next;
- struct resource *res;
- struct pci_dev *dev;
-};
-
-/*
- * IO resources have these defined flags.
- */
-#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
-
-#define IORESOURCE_IO 0x00000100 /* Resource type */
-#define IORESOURCE_MEM 0x00000200
-#define IORESOURCE_IRQ 0x00000400
-#define IORESOURCE_DMA 0x00000800
-
-#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */
-#define IORESOURCE_READONLY 0x00002000
-#define IORESOURCE_CACHEABLE 0x00004000
-#define IORESOURCE_RANGELENGTH 0x00008000
-#define IORESOURCE_SHADOWABLE 0x00010000
-#define IORESOURCE_BUS_HAS_VGA 0x00080000
-
-#define IORESOURCE_DISABLED 0x10000000
-#define IORESOURCE_UNSET 0x20000000
-#define IORESOURCE_AUTO 0x40000000
-#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
-
-/* ISA PnP IRQ specific bits (IORESOURCE_BITS) */
-#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
-#define IORESOURCE_IRQ_LOWEDGE (1<<1)
-#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
-#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
-#define IORESOURCE_IRQ_SHAREABLE (1<<4)
-
-/* ISA PnP DMA specific bits (IORESOURCE_BITS) */
-#define IORESOURCE_DMA_TYPE_MASK (3<<0)
-#define IORESOURCE_DMA_8BIT (0<<0)
-#define IORESOURCE_DMA_8AND16BIT (1<<0)
-#define IORESOURCE_DMA_16BIT (2<<0)
-
-#define IORESOURCE_DMA_MASTER (1<<2)
-#define IORESOURCE_DMA_BYTE (1<<3)
-#define IORESOURCE_DMA_WORD (1<<4)
-
-#define IORESOURCE_DMA_SPEED_MASK (3<<6)
-#define IORESOURCE_DMA_COMPATIBLE (0<<6)
-#define IORESOURCE_DMA_TYPEA (1<<6)
-#define IORESOURCE_DMA_TYPEB (2<<6)
-#define IORESOURCE_DMA_TYPEF (3<<6)
-
-/* ISA PnP memory I/O specific bits (IORESOURCE_BITS) */
-#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */
-#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */
-#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */
-#define IORESOURCE_MEM_TYPE_MASK (3<<3)
-#define IORESOURCE_MEM_8BIT (0<<3)
-#define IORESOURCE_MEM_16BIT (1<<3)
-#define IORESOURCE_MEM_8AND16BIT (2<<3)
-#define IORESOURCE_MEM_32BIT (3<<3)
-#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
-#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
-
-/* PCI ROM control bits (IORESOURCE_BITS) */
-#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
-#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
-#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
-
-/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
-extern struct resource ioport_resource;
-extern struct resource iomem_resource;
-
-extern int request_resource(struct resource *root, struct resource *new);
-extern struct resource * ____request_resource(struct resource *root, struct resource *new);
-extern int release_resource(struct resource *new);
-extern int insert_resource(struct resource *parent, struct resource *new);
-extern int allocate_resource(struct resource *root, struct resource *new,
- resource_size_t size, resource_size_t min,
- resource_size_t max, resource_size_t align,
- void (*alignf)(void *, struct resource *,
- resource_size_t, resource_size_t),
- void *alignf_data);
-int adjust_resource(struct resource *res, resource_size_t start,
- resource_size_t size);
-
-/* get registered SYSTEM_RAM resources in specified area */
-extern int find_next_system_ram(struct resource *res);
-
-/* Convenience shorthand with allocation */
-#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
-#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
-#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
-
-extern struct resource * __request_region(struct resource *,
- resource_size_t start,
- resource_size_t n, const char *name);
-
-/* Compatibility cruft */
-#define release_region(start,n) __release_region(&ioport_resource, (start), (n))
-#define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n))
-#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n))
-
-extern int __check_region(struct resource *, resource_size_t, resource_size_t);
-extern void __release_region(struct resource *, resource_size_t,
- resource_size_t);
-
-static inline int __deprecated check_region(resource_size_t s,
- resource_size_t n)
-{
- return __check_region(&ioport_resource, s, n);
-}
-#endif /* _LINUX_IOPORT_H */
diff --git a/xen/include/asm-ia64/linux/jiffies.h b/xen/include/asm-ia64/linux/jiffies.h
deleted file mode 100644
index d7a2555a88..0000000000
--- a/xen/include/asm-ia64/linux/jiffies.h
+++ /dev/null
@@ -1,450 +0,0 @@
-#ifndef _LINUX_JIFFIES_H
-#define _LINUX_JIFFIES_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <asm/param.h> /* for HZ */
-#include <asm/div64.h>
-
-#ifndef div_long_long_rem
-#define div_long_long_rem(dividend,divisor,remainder) \
-({ \
- u64 result = dividend; \
- *remainder = do_div(result,divisor); \
- result; \
-})
-#endif
-
-/*
- * The following defines establish the engineering parameters of the PLL
- * model. The HZ variable establishes the timer interrupt frequency, 100 Hz
- * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the
- * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the
- * nearest power of two in order to avoid hardware multiply operations.
- */
-#if HZ >= 12 && HZ < 24
-# define SHIFT_HZ 4
-#elif HZ >= 24 && HZ < 48
-# define SHIFT_HZ 5
-#elif HZ >= 48 && HZ < 96
-# define SHIFT_HZ 6
-#elif HZ >= 96 && HZ < 192
-# define SHIFT_HZ 7
-#elif HZ >= 192 && HZ < 384
-# define SHIFT_HZ 8
-#elif HZ >= 384 && HZ < 768
-# define SHIFT_HZ 9
-#elif HZ >= 768 && HZ < 1536
-# define SHIFT_HZ 10
-#else
-# error You lose.
-#endif
-
-/* LATCH is used in the interval timer and ftape setup. */
-#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-
-/* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can
- * improve accuracy by shifting LSH bits, hence calculating:
- * (NOM << LSH) / DEN
- * This however means trouble for large NOM, because (NOM << LSH) may no
- * longer fit in 32 bits. The following way of calculating this gives us
- * some slack, under the following conditions:
- * - (NOM / DEN) fits in (32 - LSH) bits.
- * - (NOM % DEN) fits in (32 - LSH) bits.
- */
-#define SH_DIV(NOM,DEN,LSH) ( ((NOM / DEN) << LSH) \
- + (((NOM % DEN) << LSH) + DEN / 2) / DEN)
-
-/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
-#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
-
-/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
-#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
-
-/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
-#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
-
-/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */
-/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
-#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
-
-/* some arch's have a small-data section that can be accessed register-relative
- * but that can only take up to, say, 4-byte variables. jiffies being part of
- * an 8-byte variable may not be correctly accessed unless we force the issue
- */
-#define __jiffy_data __attribute__((section(".data")))
-
-/*
- * The 64-bit value is not volatile - you MUST NOT read it
- * without sampling the sequence number in xtime_lock.
- * get_jiffies_64() will do this for you as appropriate.
- */
-extern u64 __jiffy_data jiffies_64;
-extern unsigned long volatile __jiffy_data jiffies;
-
-#if (BITS_PER_LONG < 64)
-u64 get_jiffies_64(void);
-#else
-static inline u64 get_jiffies_64(void)
-{
- return (u64)jiffies;
-}
-#endif
-
-/*
- * These inlines deal with timer wrapping correctly. You are
- * strongly encouraged to use them
- * 1. Because people otherwise forget
- * 2. Because if the timer wrap changes in future you won't have to
- * alter your driver code.
- *
- * time_after(a,b) returns true if the time a is after time b.
- *
- * Do this with "<0" and ">=0" to only test the sign of the result. A
- * good compiler would generate better code (and a really good compiler
- * wouldn't care). Gcc is currently neither.
- */
-#define time_after(a,b) \
- (typecheck(unsigned long, a) && \
- typecheck(unsigned long, b) && \
- ((long)(b) - (long)(a) < 0))
-#define time_before(a,b) time_after(b,a)
-
-#define time_after_eq(a,b) \
- (typecheck(unsigned long, a) && \
- typecheck(unsigned long, b) && \
- ((long)(a) - (long)(b) >= 0))
-#define time_before_eq(a,b) time_after_eq(b,a)
-
-/*
- * Have the 32 bit jiffies value wrap 5 minutes after boot
- * so jiffies wrap bugs show up earlier.
- */
-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
-
-/*
- * Change timeval to jiffies, trying to avoid the
- * most obvious overflows..
- *
- * And some not so obvious.
- *
- * Note that we don't want to return MAX_LONG, because
- * for various timeout reasons we often end up having
- * to wait "jiffies+1" in order to guarantee that we wait
- * at _least_ "jiffies" - so "jiffies+1" had better still
- * be positive.
- */
-#define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
-
-/*
- * We want to do realistic conversions of time so we need to use the same
- * values the update wall clock code uses as the jiffies size. This value
- * is: TICK_NSEC (which is defined in timex.h). This
- * is a constant and is in nanoseconds. We will used scaled math
- * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and
- * NSEC_JIFFIE_SC. Note that these defines contain nothing but
- * constants and so are computed at compile time. SHIFT_HZ (computed in
- * timex.h) adjusts the scaling for different HZ values.
-
- * Scaled math??? What is that?
- *
- * Scaled math is a way to do integer math on values that would,
- * otherwise, either overflow, underflow, or cause undesired div
- * instructions to appear in the execution path. In short, we "scale"
- * up the operands so they take more bits (more precision, less
- * underflow), do the desired operation and then "scale" the result back
- * by the same amount. If we do the scaling by shifting we avoid the
- * costly mpy and the dastardly div instructions.
-
- * Suppose, for example, we want to convert from seconds to jiffies
- * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The
- * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We
- * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we
- * might calculate at compile time, however, the result will only have
- * about 3-4 bits of precision (less for smaller values of HZ).
- *
- * So, we scale as follows:
- * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE);
- * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE;
- * Then we make SCALE a power of two so:
- * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE;
- * Now we define:
- * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE))
- * jiff = (sec * SEC_CONV) >> SCALE;
- *
- * Often the math we use will expand beyond 32-bits so we tell C how to
- * do this and pass the 64-bit result of the mpy through the ">> SCALE"
- * which should take the result back to 32-bits. We want this expansion
- * to capture as much precision as possible. At the same time we don't
- * want to overflow so we pick the SCALE to avoid this. In this file,
- * that means using a different scale for each range of HZ values (as
- * defined in timex.h).
- *
- * For those who want to know, gcc will give a 64-bit result from a "*"
- * operator if the result is a long long AND at least one of the
- * operands is cast to long long (usually just prior to the "*" so as
- * not to confuse it into thinking it really has a 64-bit operand,
- * which, buy the way, it can do, but it take more code and at least 2
- * mpys).
-
- * We also need to be aware that one second in nanoseconds is only a
- * couple of bits away from overflowing a 32-bit word, so we MUST use
- * 64-bits to get the full range time in nanoseconds.
-
- */
-
-/*
- * Here are the scales we will use. One for seconds, nanoseconds and
- * microseconds.
- *
- * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and
- * check if the sign bit is set. If not, we bump the shift count by 1.
- * (Gets an extra bit of precision where we can use it.)
- * We know it is set for HZ = 1024 and HZ = 100 not for 1000.
- * Haven't tested others.
-
- * Limits of cpp (for #if expressions) only long (no long long), but
- * then we only need the most signicant bit.
- */
-
-#define SEC_JIFFIE_SC (31 - SHIFT_HZ)
-#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000)
-#undef SEC_JIFFIE_SC
-#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
-#endif
-#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
-#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-
-#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-#define USEC_CONVERSION \
- ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-/*
- * USEC_ROUND is used in the timeval to jiffie conversion. See there
- * for more details. It is the scaled resolution rounding value. Note
- * that it is a 64-bit value. Since, when it is applied, we are already
- * in jiffies (albit scaled), it is nothing but the bits we will shift
- * off.
- */
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
-/*
- * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
- * into seconds. The 64-bit case will overflow if we are not careful,
- * so use the messy SH_DIV macro to do it. Still all constants.
- */
-#if BITS_PER_LONG < 64
-# define MAX_SEC_IN_JIFFIES \
- (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
-#else /* take care of overflow on 64 bits machines */
-# define MAX_SEC_IN_JIFFIES \
- (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
-
-#endif
-
-/*
- * Convert jiffies to milliseconds and back.
- *
- * Avoid unnecessary multiplications/divisions in the
- * two most common HZ cases:
- */
-static inline unsigned int jiffies_to_msecs(const unsigned long j)
-{
-#if HZ <= 1000 && !(1000 % HZ)
- return (1000 / HZ) * j;
-#elif HZ > 1000 && !(HZ % 1000)
- return (j + (HZ / 1000) - 1)/(HZ / 1000);
-#else
- return (j * 1000) / HZ;
-#endif
-}
-
-static inline unsigned int jiffies_to_usecs(const unsigned long j)
-{
-#if HZ <= 1000000 && !(1000000 % HZ)
- return (1000000 / HZ) * j;
-#elif HZ > 1000000 && !(HZ % 1000000)
- return (j + (HZ / 1000000) - 1)/(HZ / 1000000);
-#else
- return (j * 1000000) / HZ;
-#endif
-}
-
-static inline unsigned long msecs_to_jiffies(const unsigned int m)
-{
- if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-#if HZ <= 1000 && !(1000 % HZ)
- return (m + (1000 / HZ) - 1) / (1000 / HZ);
-#elif HZ > 1000 && !(HZ % 1000)
- return m * (HZ / 1000);
-#else
- return (m * HZ + 999) / 1000;
-#endif
-}
-
-static inline unsigned long usecs_to_jiffies(const unsigned int u)
-{
- if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-#if HZ <= 1000000 && !(1000000 % HZ)
- return (u + (1000000 / HZ) - 1) / (1000000 / HZ);
-#elif HZ > 1000000 && !(HZ % 1000000)
- return u * (HZ / 1000000);
-#else
- return (u * HZ + 999999) / 1000000;
-#endif
-}
-
-/*
- * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
- * that a remainder subtract here would not do the right thing as the
- * resolution values don't fall on second boundries. I.e. the line:
- * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
- *
- * Rather, we just shift the bits off the right.
- *
- * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
- * value to a scaled second value.
- */
-static __inline__ unsigned long
-timespec_to_jiffies(const struct timespec *value)
-{
- unsigned long sec = value->tv_sec;
- long nsec = value->tv_nsec + TICK_NSEC - 1;
-
- if (sec >= MAX_SEC_IN_JIFFIES){
- sec = MAX_SEC_IN_JIFFIES;
- nsec = 0;
- }
- return (((u64)sec * SEC_CONVERSION) +
- (((u64)nsec * NSEC_CONVERSION) >>
- (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
-
-}
-
-static __inline__ void
-jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
-{
- /*
- * Convert jiffies to nanoseconds and separate with
- * one divide.
- */
- u64 nsec = (u64)jiffies * TICK_NSEC;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
-}
-
-/* Same for "timeval"
- *
- * Well, almost. The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
- */
-static __inline__ unsigned long
-timeval_to_jiffies(const struct timeval *value)
-{
- unsigned long sec = value->tv_sec;
- long usec = value->tv_usec;
-
- if (sec >= MAX_SEC_IN_JIFFIES){
- sec = MAX_SEC_IN_JIFFIES;
- usec = 0;
- }
- return (((u64)sec * SEC_CONVERSION) +
- (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
- (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
-}
-
-static __inline__ void
-jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
-{
- /*
- * Convert jiffies to nanoseconds and separate with
- * one divide.
- */
- u64 nsec = (u64)jiffies * TICK_NSEC;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec);
- value->tv_usec /= NSEC_PER_USEC;
-}
-
-/*
- * Convert jiffies/jiffies_64 to clock_t and back.
- */
-static inline clock_t jiffies_to_clock_t(long x)
-{
-#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
- return x / (HZ / USER_HZ);
-#else
- u64 tmp = (u64)x * TICK_NSEC;
- do_div(tmp, (NSEC_PER_SEC / USER_HZ));
- return (long)tmp;
-#endif
-}
-
-static inline unsigned long clock_t_to_jiffies(unsigned long x)
-{
-#if (HZ % USER_HZ)==0
- if (x >= ~0UL / (HZ / USER_HZ))
- return ~0UL;
- return x * (HZ / USER_HZ);
-#else
- u64 jif;
-
- /* Don't worry about loss of precision here .. */
- if (x >= ~0UL / HZ * USER_HZ)
- return ~0UL;
-
- /* .. but do try to contain it here */
- jif = x * (u64) HZ;
- do_div(jif, USER_HZ);
- return jif;
-#endif
-}
-
-static inline u64 jiffies_64_to_clock_t(u64 x)
-{
-#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
- do_div(x, HZ / USER_HZ);
-#else
- /*
- * There are better ways that don't overflow early,
- * but even this doesn't overflow in hundreds of years
- * in 64 bits, so..
- */
- x *= TICK_NSEC;
- do_div(x, (NSEC_PER_SEC / USER_HZ));
-#endif
- return x;
-}
-
-static inline u64 nsec_to_clock_t(u64 x)
-{
-#if (NSEC_PER_SEC % USER_HZ) == 0
- do_div(x, (NSEC_PER_SEC / USER_HZ));
-#elif (USER_HZ % 512) == 0
- x *= USER_HZ/512;
- do_div(x, (NSEC_PER_SEC / 512));
-#else
- /*
- * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
- * overflow after 64.99 years.
- * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
- */
- x *= 9;
- do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2))
- / USER_HZ));
-#endif
- return x;
-}
-
-#endif
diff --git a/xen/include/asm-ia64/linux/klist.h b/xen/include/asm-ia64/linux/klist.h
deleted file mode 100644
index 74071254c9..0000000000
--- a/xen/include/asm-ia64/linux/klist.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * klist.h - Some generic list helpers, extending struct list_head a bit.
- *
- * Implementations are found in lib/klist.c
- *
- *
- * Copyright (C) 2005 Patrick Mochel
- *
- * This file is rleased under the GPL v2.
- */
-
-#ifndef _LINUX_KLIST_H
-#define _LINUX_KLIST_H
-
-#include <linux/spinlock.h>
-#include <linux/completion.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-
-struct klist_node;
-struct klist {
- spinlock_t k_lock;
- struct list_head k_list;
- void (*get)(struct klist_node *);
- void (*put)(struct klist_node *);
-};
-
-
-extern void klist_init(struct klist * k, void (*get)(struct klist_node *),
- void (*put)(struct klist_node *));
-
-struct klist_node {
- struct klist * n_klist;
- struct list_head n_node;
- struct kref n_ref;
- struct completion n_removed;
-};
-
-extern void klist_add_tail(struct klist_node * n, struct klist * k);
-extern void klist_add_head(struct klist_node * n, struct klist * k);
-
-extern void klist_del(struct klist_node * n);
-extern void klist_remove(struct klist_node * n);
-
-extern int klist_node_attached(struct klist_node * n);
-
-
-struct klist_iter {
- struct klist * i_klist;
- struct list_head * i_head;
- struct klist_node * i_cur;
-};
-
-
-extern void klist_iter_init(struct klist * k, struct klist_iter * i);
-extern void klist_iter_init_node(struct klist * k, struct klist_iter * i,
- struct klist_node * n);
-extern void klist_iter_exit(struct klist_iter * i);
-extern struct klist_node * klist_next(struct klist_iter * i);
-
-#endif
diff --git a/xen/include/asm-ia64/linux/kmalloc_sizes.h b/xen/include/asm-ia64/linux/kmalloc_sizes.h
deleted file mode 100644
index d82d4c05c1..0000000000
--- a/xen/include/asm-ia64/linux/kmalloc_sizes.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#if (PAGE_SIZE == 4096)
- CACHE(32)
-#endif
- CACHE(64)
-#if L1_CACHE_BYTES < 64
- CACHE(96)
-#endif
- CACHE(128)
-#if L1_CACHE_BYTES < 128
- CACHE(192)
-#endif
- CACHE(256)
- CACHE(512)
- CACHE(1024)
- CACHE(2048)
- CACHE(4096)
- CACHE(8192)
- CACHE(16384)
- CACHE(32768)
- CACHE(65536)
- CACHE(131072)
-#ifndef CONFIG_MMU
- CACHE(262144)
- CACHE(524288)
- CACHE(1048576)
-#ifdef CONFIG_LARGE_ALLOCS
- CACHE(2097152)
- CACHE(4194304)
- CACHE(8388608)
- CACHE(16777216)
- CACHE(33554432)
-#endif /* CONFIG_LARGE_ALLOCS */
-#endif /* CONFIG_MMU */
diff --git a/xen/include/asm-ia64/linux/kref.h b/xen/include/asm-ia64/linux/kref.h
deleted file mode 100644
index 6fee353989..0000000000
--- a/xen/include/asm-ia64/linux/kref.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * kref.c - library routines for handling generic reference counted objects
- *
- * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
- * Copyright (C) 2004 IBM Corp.
- *
- * based on kobject.h which was:
- * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
- * Copyright (C) 2002-2003 Open Source Development Labs
- *
- * This file is released under the GPLv2.
- *
- */
-
-#ifndef _KREF_H_
-#define _KREF_H_
-
-#ifdef __KERNEL__
-
-#include <linux/types.h>
-#include <asm/atomic.h>
-
-struct kref {
- atomic_t refcount;
-};
-
-void kref_init(struct kref *kref);
-void kref_get(struct kref *kref);
-int kref_put(struct kref *kref, void (*release) (struct kref *kref));
-
-#endif /* __KERNEL__ */
-#endif /* _KREF_H_ */
diff --git a/xen/include/asm-ia64/linux/linkage.h b/xen/include/asm-ia64/linux/linkage.h
deleted file mode 100644
index 338f7795d8..0000000000
--- a/xen/include/asm-ia64/linux/linkage.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef _LINUX_LINKAGE_H
-#define _LINUX_LINKAGE_H
-
-#include <linux/config.h>
-#include <asm/linkage.h>
-
-#ifdef __cplusplus
-#define CPP_ASMLINKAGE extern "C"
-#else
-#define CPP_ASMLINKAGE
-#endif
-
-#ifndef asmlinkage
-#define asmlinkage CPP_ASMLINKAGE
-#endif
-
-#ifndef prevent_tail_call
-# define prevent_tail_call(ret) do { } while (0)
-#endif
-
-#ifndef __ALIGN
-#define __ALIGN .align 4,0x90
-#define __ALIGN_STR ".align 4,0x90"
-#endif
-
-#ifdef __ASSEMBLY__
-
-#define ALIGN __ALIGN
-#define ALIGN_STR __ALIGN_STR
-
-#define ENTRY(name) \
- .globl name; \
- ALIGN; \
- name:
-
-#endif
-
-#define NORET_TYPE /**/
-#define ATTRIB_NORET __attribute__((noreturn))
-#define NORET_AND noreturn,
-
-#ifndef FASTCALL
-#define FASTCALL(x) x
-#define fastcall
-#endif
-
-#endif
diff --git a/xen/include/asm-ia64/linux/mod_devicetable.h b/xen/include/asm-ia64/linux/mod_devicetable.h
deleted file mode 100644
index e0c393cc72..0000000000
--- a/xen/include/asm-ia64/linux/mod_devicetable.h
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Device tables which are exported to userspace via
- * scripts/mod/file2alias.c. You must keep that file in sync with this
- * header.
- */
-
-#ifndef LINUX_MOD_DEVICETABLE_H
-#define LINUX_MOD_DEVICETABLE_H
-
-#ifdef __KERNEL__
-#include <linux/types.h>
-typedef unsigned long kernel_ulong_t;
-#endif
-
-#define PCI_ANY_ID (~0)
-
-struct pci_device_id {
- __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
- __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
- __u32 class, class_mask; /* (class,subclass,prog-if) triplet */
- kernel_ulong_t driver_data; /* Data private to the driver */
-};
-
-
-#define IEEE1394_MATCH_VENDOR_ID 0x0001
-#define IEEE1394_MATCH_MODEL_ID 0x0002
-#define IEEE1394_MATCH_SPECIFIER_ID 0x0004
-#define IEEE1394_MATCH_VERSION 0x0008
-
-struct ieee1394_device_id {
- __u32 match_flags;
- __u32 vendor_id;
- __u32 model_id;
- __u32 specifier_id;
- __u32 version;
- kernel_ulong_t driver_data
- __attribute__((aligned(sizeof(kernel_ulong_t))));
-};
-
-
-/*
- * Device table entry for "new style" table-driven USB drivers.
- * User mode code can read these tables to choose which modules to load.
- * Declare the table as a MODULE_DEVICE_TABLE.
- *
- * A probe() parameter will point to a matching entry from this table.
- * Use the driver_info field for each match to hold information tied
- * to that match: device quirks, etc.
- *
- * Terminate the driver's table with an all-zeroes entry.
- * Use the flag values to control which fields are compared.
- */
-
-/**
- * struct usb_device_id - identifies USB devices for probing and hotplugging
- * @match_flags: Bit mask controlling of the other fields are used to match
- * against new devices. Any field except for driver_info may be used,
- * although some only make sense in conjunction with other fields.
- * This is usually set by a USB_DEVICE_*() macro, which sets all
- * other fields in this structure except for driver_info.
- * @idVendor: USB vendor ID for a device; numbers are assigned
- * by the USB forum to its members.
- * @idProduct: Vendor-assigned product ID.
- * @bcdDevice_lo: Low end of range of vendor-assigned product version numbers.
- * This is also used to identify individual product versions, for
- * a range consisting of a single device.
- * @bcdDevice_hi: High end of version number range. The range of product
- * versions is inclusive.
- * @bDeviceClass: Class of device; numbers are assigned
- * by the USB forum. Products may choose to implement classes,
- * or be vendor-specific. Device classes specify behavior of all
- * the interfaces on a devices.
- * @bDeviceSubClass: Subclass of device; associated with bDeviceClass.
- * @bDeviceProtocol: Protocol of device; associated with bDeviceClass.
- * @bInterfaceClass: Class of interface; numbers are assigned
- * by the USB forum. Products may choose to implement classes,
- * or be vendor-specific. Interface classes specify behavior only
- * of a given interface; other interfaces may support other classes.
- * @bInterfaceSubClass: Subclass of interface; associated with bInterfaceClass.
- * @bInterfaceProtocol: Protocol of interface; associated with bInterfaceClass.
- * @driver_info: Holds information used by the driver. Usually it holds
- * a pointer to a descriptor understood by the driver, or perhaps
- * device flags.
- *
- * In most cases, drivers will create a table of device IDs by using
- * USB_DEVICE(), or similar macros designed for that purpose.
- * They will then export it to userspace using MODULE_DEVICE_TABLE(),
- * and provide it to the USB core through their usb_driver structure.
- *
- * See the usb_match_id() function for information about how matches are
- * performed. Briefly, you will normally use one of several macros to help
- * construct these entries. Each entry you provide will either identify
- * one or more specific products, or will identify a class of products
- * which have agreed to behave the same. You should put the more specific
- * matches towards the beginning of your table, so that driver_info can
- * record quirks of specific products.
- */
-struct usb_device_id {
- /* which fields to match against? */
- __u16 match_flags;
-
- /* Used for product specific matches; range is inclusive */
- __u16 idVendor;
- __u16 idProduct;
- __u16 bcdDevice_lo;
- __u16 bcdDevice_hi;
-
- /* Used for device class matches */
- __u8 bDeviceClass;
- __u8 bDeviceSubClass;
- __u8 bDeviceProtocol;
-
- /* Used for interface class matches */
- __u8 bInterfaceClass;
- __u8 bInterfaceSubClass;
- __u8 bInterfaceProtocol;
-
- /* not matched against */
- kernel_ulong_t driver_info;
-};
-
-/* Some useful macros to use to create struct usb_device_id */
-#define USB_DEVICE_ID_MATCH_VENDOR 0x0001
-#define USB_DEVICE_ID_MATCH_PRODUCT 0x0002
-#define USB_DEVICE_ID_MATCH_DEV_LO 0x0004
-#define USB_DEVICE_ID_MATCH_DEV_HI 0x0008
-#define USB_DEVICE_ID_MATCH_DEV_CLASS 0x0010
-#define USB_DEVICE_ID_MATCH_DEV_SUBCLASS 0x0020
-#define USB_DEVICE_ID_MATCH_DEV_PROTOCOL 0x0040
-#define USB_DEVICE_ID_MATCH_INT_CLASS 0x0080
-#define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
-#define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
-
-/* s390 CCW devices */
-struct ccw_device_id {
- __u16 match_flags; /* which fields to match against */
-
- __u16 cu_type; /* control unit type */
- __u16 dev_type; /* device type */
- __u8 cu_model; /* control unit model */
- __u8 dev_model; /* device model */
-
- kernel_ulong_t driver_info;
-};
-
-#define CCW_DEVICE_ID_MATCH_CU_TYPE 0x01
-#define CCW_DEVICE_ID_MATCH_CU_MODEL 0x02
-#define CCW_DEVICE_ID_MATCH_DEVICE_TYPE 0x04
-#define CCW_DEVICE_ID_MATCH_DEVICE_MODEL 0x08
-
-/* s390 AP bus devices */
-struct ap_device_id {
- __u16 match_flags; /* which fields to match against */
- __u8 dev_type; /* device type */
- __u8 pad1;
- __u32 pad2;
- kernel_ulong_t driver_info;
-};
-
-#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01
-
-
-#define PNP_ID_LEN 8
-#define PNP_MAX_DEVICES 8
-
-struct pnp_device_id {
- __u8 id[PNP_ID_LEN];
- kernel_ulong_t driver_data;
-};
-
-struct pnp_card_device_id {
- __u8 id[PNP_ID_LEN];
- kernel_ulong_t driver_data;
- struct {
- __u8 id[PNP_ID_LEN];
- } devs[PNP_MAX_DEVICES];
-};
-
-
-#define SERIO_ANY 0xff
-
-struct serio_device_id {
- __u8 type;
- __u8 extra;
- __u8 id;
- __u8 proto;
-};
-
-/*
- * Struct used for matching a device
- */
-struct of_device_id
-{
- char name[32];
- char type[32];
- char compatible[128];
-#ifdef __KERNEL__
- void *data;
-#else
- kernel_ulong_t data;
-#endif
-};
-
-/* VIO */
-struct vio_device_id {
- char type[32];
- char compat[32];
-};
-
-/* PCMCIA */
-
-struct pcmcia_device_id {
- __u16 match_flags;
-
- __u16 manf_id;
- __u16 card_id;
-
- __u8 func_id;
-
- /* for real multi-function devices */
- __u8 function;
-
- /* for pseudo multi-function devices */
- __u8 device_no;
-
- __u32 prod_id_hash[4]
- __attribute__((aligned(sizeof(__u32))));
-
- /* not matched against in kernelspace*/
-#ifdef __KERNEL__
- const char * prod_id[4];
-#else
- kernel_ulong_t prod_id[4]
- __attribute__((aligned(sizeof(kernel_ulong_t))));
-#endif
-
- /* not matched against */
- kernel_ulong_t driver_info;
-#ifdef __KERNEL__
- char * cisfile;
-#else
- kernel_ulong_t cisfile;
-#endif
-};
-
-#define PCMCIA_DEV_ID_MATCH_MANF_ID 0x0001
-#define PCMCIA_DEV_ID_MATCH_CARD_ID 0x0002
-#define PCMCIA_DEV_ID_MATCH_FUNC_ID 0x0004
-#define PCMCIA_DEV_ID_MATCH_FUNCTION 0x0008
-#define PCMCIA_DEV_ID_MATCH_PROD_ID1 0x0010
-#define PCMCIA_DEV_ID_MATCH_PROD_ID2 0x0020
-#define PCMCIA_DEV_ID_MATCH_PROD_ID3 0x0040
-#define PCMCIA_DEV_ID_MATCH_PROD_ID4 0x0080
-#define PCMCIA_DEV_ID_MATCH_DEVICE_NO 0x0100
-#define PCMCIA_DEV_ID_MATCH_FAKE_CIS 0x0200
-#define PCMCIA_DEV_ID_MATCH_ANONYMOUS 0x0400
-
-/* I2C */
-struct i2c_device_id {
- __u16 id;
-};
-
-/* Input */
-#define INPUT_DEVICE_ID_EV_MAX 0x1f
-#define INPUT_DEVICE_ID_KEY_MAX 0x1ff
-#define INPUT_DEVICE_ID_REL_MAX 0x0f
-#define INPUT_DEVICE_ID_ABS_MAX 0x3f
-#define INPUT_DEVICE_ID_MSC_MAX 0x07
-#define INPUT_DEVICE_ID_LED_MAX 0x0f
-#define INPUT_DEVICE_ID_SND_MAX 0x07
-#define INPUT_DEVICE_ID_FF_MAX 0x7f
-#define INPUT_DEVICE_ID_SW_MAX 0x0f
-
-#define INPUT_DEVICE_ID_MATCH_BUS 1
-#define INPUT_DEVICE_ID_MATCH_VENDOR 2
-#define INPUT_DEVICE_ID_MATCH_PRODUCT 4
-#define INPUT_DEVICE_ID_MATCH_VERSION 8
-
-#define INPUT_DEVICE_ID_MATCH_EVBIT 0x0010
-#define INPUT_DEVICE_ID_MATCH_KEYBIT 0x0020
-#define INPUT_DEVICE_ID_MATCH_RELBIT 0x0040
-#define INPUT_DEVICE_ID_MATCH_ABSBIT 0x0080
-#define INPUT_DEVICE_ID_MATCH_MSCIT 0x0100
-#define INPUT_DEVICE_ID_MATCH_LEDBIT 0x0200
-#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400
-#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800
-#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000
-
-struct input_device_id {
-
- kernel_ulong_t flags;
-
- __u16 bustype;
- __u16 vendor;
- __u16 product;
- __u16 version;
-
- kernel_ulong_t evbit[INPUT_DEVICE_ID_EV_MAX / BITS_PER_LONG + 1];
- kernel_ulong_t keybit[INPUT_DEVICE_ID_KEY_MAX / BITS_PER_LONG + 1];
- kernel_ulong_t relbit[INPUT_DEVICE_ID_REL_MAX / BITS_PER_LONG + 1];
- kernel_ulong_t absbit[INPUT_DEVICE_ID_ABS_MAX / BITS_PER_LONG + 1];
- kernel_ulong_t mscbit[INPUT_DEVICE_ID_MSC_MAX / BITS_PER_LONG + 1];
- kernel_ulong_t ledbit[INPUT_DEVICE_ID_LED_MAX / BITS_PER_LONG + 1];
- kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1];
- kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1];
- kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1];
-
- kernel_ulong_t driver_info;
-};
-
-/* EISA */
-
-#define EISA_SIG_LEN 8
-
-/* The EISA signature, in ASCII form, null terminated */
-struct eisa_device_id {
- char sig[EISA_SIG_LEN];
- kernel_ulong_t driver_data;
-};
-
-#define EISA_DEVICE_MODALIAS_FMT "eisa:s%s"
-
-#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/xen/include/asm-ia64/linux/notifier.h b/xen/include/asm-ia64/linux/notifier.h
deleted file mode 100644
index 5937dd6053..0000000000
--- a/xen/include/asm-ia64/linux/notifier.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Routines to manage notifier chains for passing status changes to any
- * interested routines. We need this instead of hard coded call lists so
- * that modules can poke their nose into the innards. The network devices
- * needed them so here they are for the rest of you.
- *
- * Alan Cox <Alan.Cox@linux.org>
- */
-
-#ifndef _LINUX_NOTIFIER_H
-#define _LINUX_NOTIFIER_H
-#include <linux/errno.h>
-
-struct notifier_block
-{
- int (*notifier_call)(struct notifier_block *self, unsigned long, void *);
- struct notifier_block *next;
- int priority;
-};
-
-
-#ifdef __KERNEL__
-
-extern int notifier_chain_register(struct notifier_block **list, struct notifier_block *n);
-extern int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n);
-extern int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v);
-
-#define NOTIFY_DONE 0x0000 /* Don't care */
-#define NOTIFY_OK 0x0001 /* Suits me */
-#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
-#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */
-/*
- * Clean way to return from the notifier and stop further calls.
- */
-#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
-
-/*
- * Declared notifiers so far. I can imagine quite a few more chains
- * over time (eg laptop power reset chains, reboot chain (to clean
- * device units up), device [un]mount chain, module load/unload chain,
- * low memory chain, screenblank chain (for plug in modular screenblankers)
- * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
- */
-
-/* netdevice notifier chain */
-#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
-#define NETDEV_DOWN 0x0002
-#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
- detected a hardware crash and restarted
- - we can use this eg to kick tcp sessions
- once done */
-#define NETDEV_CHANGE 0x0004 /* Notify device state change */
-#define NETDEV_REGISTER 0x0005
-#define NETDEV_UNREGISTER 0x0006
-#define NETDEV_CHANGEMTU 0x0007
-#define NETDEV_CHANGEADDR 0x0008
-#define NETDEV_GOING_DOWN 0x0009
-#define NETDEV_CHANGENAME 0x000A
-#define NETDEV_FEAT_CHANGE 0x000B
-
-#define SYS_DOWN 0x0001 /* Notify of system down */
-#define SYS_RESTART SYS_DOWN
-#define SYS_HALT 0x0002 /* Notify of system halt */
-#define SYS_POWER_OFF 0x0003 /* Notify of system power off */
-
-#define NETLINK_URELEASE 0x0001 /* Unicast netlink socket released */
-
-#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
-#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
-#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
-#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
-#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
-#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
-
-#endif /* __KERNEL__ */
-#endif /* _LINUX_NOTIFIER_H */
diff --git a/xen/include/asm-ia64/linux/pci_ids.h b/xen/include/asm-ia64/linux/pci_ids.h
deleted file mode 100644
index fa4e1d7997..0000000000
--- a/xen/include/asm-ia64/linux/pci_ids.h
+++ /dev/null
@@ -1,2356 +0,0 @@
-/*
- * PCI Class, Vendor and Device IDs
- *
- * Please keep sorted.
- */
-
-/* Device classes and subclasses */
-
-#define PCI_CLASS_NOT_DEFINED 0x0000
-#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
-
-#define PCI_BASE_CLASS_STORAGE 0x01
-#define PCI_CLASS_STORAGE_SCSI 0x0100
-#define PCI_CLASS_STORAGE_IDE 0x0101
-#define PCI_CLASS_STORAGE_FLOPPY 0x0102
-#define PCI_CLASS_STORAGE_IPI 0x0103
-#define PCI_CLASS_STORAGE_RAID 0x0104
-#define PCI_CLASS_STORAGE_SAS 0x0107
-#define PCI_CLASS_STORAGE_OTHER 0x0180
-
-#define PCI_BASE_CLASS_NETWORK 0x02
-#define PCI_CLASS_NETWORK_ETHERNET 0x0200
-#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
-#define PCI_CLASS_NETWORK_FDDI 0x0202
-#define PCI_CLASS_NETWORK_ATM 0x0203
-#define PCI_CLASS_NETWORK_OTHER 0x0280
-
-#define PCI_BASE_CLASS_DISPLAY 0x03
-#define PCI_CLASS_DISPLAY_VGA 0x0300
-#define PCI_CLASS_DISPLAY_XGA 0x0301
-#define PCI_CLASS_DISPLAY_3D 0x0302
-#define PCI_CLASS_DISPLAY_OTHER 0x0380
-
-#define PCI_BASE_CLASS_MULTIMEDIA 0x04
-#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
-#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
-#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402
-#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
-
-#define PCI_BASE_CLASS_MEMORY 0x05
-#define PCI_CLASS_MEMORY_RAM 0x0500
-#define PCI_CLASS_MEMORY_FLASH 0x0501
-#define PCI_CLASS_MEMORY_OTHER 0x0580
-
-#define PCI_BASE_CLASS_BRIDGE 0x06
-#define PCI_CLASS_BRIDGE_HOST 0x0600
-#define PCI_CLASS_BRIDGE_ISA 0x0601
-#define PCI_CLASS_BRIDGE_EISA 0x0602
-#define PCI_CLASS_BRIDGE_MC 0x0603
-#define PCI_CLASS_BRIDGE_PCI 0x0604
-#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
-#define PCI_CLASS_BRIDGE_NUBUS 0x0606
-#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
-#define PCI_CLASS_BRIDGE_RACEWAY 0x0608
-#define PCI_CLASS_BRIDGE_OTHER 0x0680
-
-#define PCI_BASE_CLASS_COMMUNICATION 0x07
-#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
-#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
-#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702
-#define PCI_CLASS_COMMUNICATION_MODEM 0x0703
-#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
-
-#define PCI_BASE_CLASS_SYSTEM 0x08
-#define PCI_CLASS_SYSTEM_PIC 0x0800
-#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010
-#define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020
-#define PCI_CLASS_SYSTEM_DMA 0x0801
-#define PCI_CLASS_SYSTEM_TIMER 0x0802
-#define PCI_CLASS_SYSTEM_RTC 0x0803
-#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804
-#define PCI_CLASS_SYSTEM_SDHCI 0x0805
-#define PCI_CLASS_SYSTEM_OTHER 0x0880
-
-#define PCI_BASE_CLASS_INPUT 0x09
-#define PCI_CLASS_INPUT_KEYBOARD 0x0900
-#define PCI_CLASS_INPUT_PEN 0x0901
-#define PCI_CLASS_INPUT_MOUSE 0x0902
-#define PCI_CLASS_INPUT_SCANNER 0x0903
-#define PCI_CLASS_INPUT_GAMEPORT 0x0904
-#define PCI_CLASS_INPUT_OTHER 0x0980
-
-#define PCI_BASE_CLASS_DOCKING 0x0a
-#define PCI_CLASS_DOCKING_GENERIC 0x0a00
-#define PCI_CLASS_DOCKING_OTHER 0x0a80
-
-#define PCI_BASE_CLASS_PROCESSOR 0x0b
-#define PCI_CLASS_PROCESSOR_386 0x0b00
-#define PCI_CLASS_PROCESSOR_486 0x0b01
-#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
-#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10
-#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
-#define PCI_CLASS_PROCESSOR_MIPS 0x0b30
-#define PCI_CLASS_PROCESSOR_CO 0x0b40
-
-#define PCI_BASE_CLASS_SERIAL 0x0c
-#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
-#define PCI_CLASS_SERIAL_ACCESS 0x0c01
-#define PCI_CLASS_SERIAL_SSA 0x0c02
-#define PCI_CLASS_SERIAL_USB 0x0c03
-#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300
-#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
-#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
-#define PCI_CLASS_SERIAL_FIBER 0x0c04
-#define PCI_CLASS_SERIAL_SMBUS 0x0c05
-
-#define PCI_BASE_CLASS_INTELLIGENT 0x0e
-#define PCI_CLASS_INTELLIGENT_I2O 0x0e00
-
-#define PCI_BASE_CLASS_SATELLITE 0x0f
-#define PCI_CLASS_SATELLITE_TV 0x0f00
-#define PCI_CLASS_SATELLITE_AUDIO 0x0f01
-#define PCI_CLASS_SATELLITE_VOICE 0x0f03
-#define PCI_CLASS_SATELLITE_DATA 0x0f04
-
-#define PCI_BASE_CLASS_CRYPT 0x10
-#define PCI_CLASS_CRYPT_NETWORK 0x1000
-#define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001
-#define PCI_CLASS_CRYPT_OTHER 0x1080
-
-#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11
-#define PCI_CLASS_SP_DPIO 0x1100
-#define PCI_CLASS_SP_OTHER 0x1180
-
-#define PCI_CLASS_OTHERS 0xff
-
-/* Vendors and devices. Sort key: vendor first, device next. */
-
-#define PCI_VENDOR_ID_DYNALINK 0x0675
-#define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702
-
-#define PCI_VENDOR_ID_BERKOM 0x0871
-#define PCI_DEVICE_ID_BERKOM_A1T 0xffa1
-#define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2
-#define PCI_DEVICE_ID_BERKOM_A4T 0xffa4
-#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8
-
-#define PCI_VENDOR_ID_COMPAQ 0x0e11
-#define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508
-#define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc
-#define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10
-#define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32
-#define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34
-#define PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE 0xae33
-#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35
-#define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40
-#define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43
-#define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011
-#define PCI_DEVICE_ID_COMPAQ_CISS 0xb060
-#define PCI_DEVICE_ID_COMPAQ_CISSB 0xb178
-#define PCI_DEVICE_ID_COMPAQ_CISSC 0x46
-#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130
-#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150
-
-#define PCI_VENDOR_ID_NCR 0x1000
-#define PCI_VENDOR_ID_LSI_LOGIC 0x1000
-#define PCI_DEVICE_ID_NCR_53C810 0x0001
-#define PCI_DEVICE_ID_NCR_53C820 0x0002
-#define PCI_DEVICE_ID_NCR_53C825 0x0003
-#define PCI_DEVICE_ID_NCR_53C815 0x0004
-#define PCI_DEVICE_ID_LSI_53C810AP 0x0005
-#define PCI_DEVICE_ID_NCR_53C860 0x0006
-#define PCI_DEVICE_ID_LSI_53C1510 0x000a
-#define PCI_DEVICE_ID_NCR_53C896 0x000b
-#define PCI_DEVICE_ID_NCR_53C895 0x000c
-#define PCI_DEVICE_ID_NCR_53C885 0x000d
-#define PCI_DEVICE_ID_NCR_53C875 0x000f
-#define PCI_DEVICE_ID_NCR_53C1510 0x0010
-#define PCI_DEVICE_ID_LSI_53C895A 0x0012
-#define PCI_DEVICE_ID_LSI_53C875A 0x0013
-#define PCI_DEVICE_ID_LSI_53C1010_33 0x0020
-#define PCI_DEVICE_ID_LSI_53C1010_66 0x0021
-#define PCI_DEVICE_ID_LSI_53C1030 0x0030
-#define PCI_DEVICE_ID_LSI_1030_53C1035 0x0032
-#define PCI_DEVICE_ID_LSI_53C1035 0x0040
-#define PCI_DEVICE_ID_NCR_53C875J 0x008f
-#define PCI_DEVICE_ID_LSI_FC909 0x0621
-#define PCI_DEVICE_ID_LSI_FC929 0x0622
-#define PCI_DEVICE_ID_LSI_FC929_LAN 0x0623
-#define PCI_DEVICE_ID_LSI_FC919 0x0624
-#define PCI_DEVICE_ID_LSI_FC919_LAN 0x0625
-#define PCI_DEVICE_ID_LSI_FC929X 0x0626
-#define PCI_DEVICE_ID_LSI_FC939X 0x0642
-#define PCI_DEVICE_ID_LSI_FC949X 0x0640
-#define PCI_DEVICE_ID_LSI_FC949ES 0x0646
-#define PCI_DEVICE_ID_LSI_FC919X 0x0628
-#define PCI_DEVICE_ID_NCR_YELLOWFIN 0x0701
-#define PCI_DEVICE_ID_LSI_61C102 0x0901
-#define PCI_DEVICE_ID_LSI_63C815 0x1000
-#define PCI_DEVICE_ID_LSI_SAS1064 0x0050
-#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411
-#define PCI_DEVICE_ID_LSI_SAS1066 0x005E
-#define PCI_DEVICE_ID_LSI_SAS1068 0x0054
-#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C
-#define PCI_DEVICE_ID_LSI_SAS1064E 0x0056
-#define PCI_DEVICE_ID_LSI_SAS1066E 0x005A
-#define PCI_DEVICE_ID_LSI_SAS1068E 0x0058
-#define PCI_DEVICE_ID_LSI_SAS1078 0x0060
-
-#define PCI_VENDOR_ID_ATI 0x1002
-/* Mach64 */
-#define PCI_DEVICE_ID_ATI_68800 0x4158
-#define PCI_DEVICE_ID_ATI_215CT222 0x4354
-#define PCI_DEVICE_ID_ATI_210888CX 0x4358
-#define PCI_DEVICE_ID_ATI_215ET222 0x4554
-/* Mach64 / Rage */
-#define PCI_DEVICE_ID_ATI_215GB 0x4742
-#define PCI_DEVICE_ID_ATI_215GD 0x4744
-#define PCI_DEVICE_ID_ATI_215GI 0x4749
-#define PCI_DEVICE_ID_ATI_215GP 0x4750
-#define PCI_DEVICE_ID_ATI_215GQ 0x4751
-#define PCI_DEVICE_ID_ATI_215XL 0x4752
-#define PCI_DEVICE_ID_ATI_215GT 0x4754
-#define PCI_DEVICE_ID_ATI_215GTB 0x4755
-#define PCI_DEVICE_ID_ATI_215_IV 0x4756
-#define PCI_DEVICE_ID_ATI_215_IW 0x4757
-#define PCI_DEVICE_ID_ATI_215_IZ 0x475A
-#define PCI_DEVICE_ID_ATI_210888GX 0x4758
-#define PCI_DEVICE_ID_ATI_215_LB 0x4c42
-#define PCI_DEVICE_ID_ATI_215_LD 0x4c44
-#define PCI_DEVICE_ID_ATI_215_LG 0x4c47
-#define PCI_DEVICE_ID_ATI_215_LI 0x4c49
-#define PCI_DEVICE_ID_ATI_215_LM 0x4c4D
-#define PCI_DEVICE_ID_ATI_215_LN 0x4c4E
-#define PCI_DEVICE_ID_ATI_215_LR 0x4c52
-#define PCI_DEVICE_ID_ATI_215_LS 0x4c53
-#define PCI_DEVICE_ID_ATI_264_LT 0x4c54
-/* Mach64 VT */
-#define PCI_DEVICE_ID_ATI_264VT 0x5654
-#define PCI_DEVICE_ID_ATI_264VU 0x5655
-#define PCI_DEVICE_ID_ATI_264VV 0x5656
-/* Rage128 GL */
-#define PCI_DEVICE_ID_ATI_RAGE128_RE 0x5245
-#define PCI_DEVICE_ID_ATI_RAGE128_RF 0x5246
-#define PCI_DEVICE_ID_ATI_RAGE128_RG 0x5247
-/* Rage128 VR */
-#define PCI_DEVICE_ID_ATI_RAGE128_RK 0x524b
-#define PCI_DEVICE_ID_ATI_RAGE128_RL 0x524c
-#define PCI_DEVICE_ID_ATI_RAGE128_SE 0x5345
-#define PCI_DEVICE_ID_ATI_RAGE128_SF 0x5346
-#define PCI_DEVICE_ID_ATI_RAGE128_SG 0x5347
-#define PCI_DEVICE_ID_ATI_RAGE128_SH 0x5348
-#define PCI_DEVICE_ID_ATI_RAGE128_SK 0x534b
-#define PCI_DEVICE_ID_ATI_RAGE128_SL 0x534c
-#define PCI_DEVICE_ID_ATI_RAGE128_SM 0x534d
-#define PCI_DEVICE_ID_ATI_RAGE128_SN 0x534e
-/* Rage128 Ultra */
-#define PCI_DEVICE_ID_ATI_RAGE128_TF 0x5446
-#define PCI_DEVICE_ID_ATI_RAGE128_TL 0x544c
-#define PCI_DEVICE_ID_ATI_RAGE128_TR 0x5452
-#define PCI_DEVICE_ID_ATI_RAGE128_TS 0x5453
-#define PCI_DEVICE_ID_ATI_RAGE128_TT 0x5454
-#define PCI_DEVICE_ID_ATI_RAGE128_TU 0x5455
-/* Rage128 M3 */
-#define PCI_DEVICE_ID_ATI_RAGE128_LE 0x4c45
-#define PCI_DEVICE_ID_ATI_RAGE128_LF 0x4c46
-/* Rage128 M4 */
-#define PCI_DEVICE_ID_ATI_RAGE128_MF 0x4d46
-#define PCI_DEVICE_ID_ATI_RAGE128_ML 0x4d4c
-/* Rage128 Pro GL */
-#define PCI_DEVICE_ID_ATI_RAGE128_PA 0x5041
-#define PCI_DEVICE_ID_ATI_RAGE128_PB 0x5042
-#define PCI_DEVICE_ID_ATI_RAGE128_PC 0x5043
-#define PCI_DEVICE_ID_ATI_RAGE128_PD 0x5044
-#define PCI_DEVICE_ID_ATI_RAGE128_PE 0x5045
-#define PCI_DEVICE_ID_ATI_RAGE128_PF 0x5046
-/* Rage128 Pro VR */
-#define PCI_DEVICE_ID_ATI_RAGE128_PG 0x5047
-#define PCI_DEVICE_ID_ATI_RAGE128_PH 0x5048
-#define PCI_DEVICE_ID_ATI_RAGE128_PI 0x5049
-#define PCI_DEVICE_ID_ATI_RAGE128_PJ 0x504A
-#define PCI_DEVICE_ID_ATI_RAGE128_PK 0x504B
-#define PCI_DEVICE_ID_ATI_RAGE128_PL 0x504C
-#define PCI_DEVICE_ID_ATI_RAGE128_PM 0x504D
-#define PCI_DEVICE_ID_ATI_RAGE128_PN 0x504E
-#define PCI_DEVICE_ID_ATI_RAGE128_PO 0x504F
-#define PCI_DEVICE_ID_ATI_RAGE128_PP 0x5050
-#define PCI_DEVICE_ID_ATI_RAGE128_PQ 0x5051
-#define PCI_DEVICE_ID_ATI_RAGE128_PR 0x5052
-#define PCI_DEVICE_ID_ATI_RAGE128_PS 0x5053
-#define PCI_DEVICE_ID_ATI_RAGE128_PT 0x5054
-#define PCI_DEVICE_ID_ATI_RAGE128_PU 0x5055
-#define PCI_DEVICE_ID_ATI_RAGE128_PV 0x5056
-#define PCI_DEVICE_ID_ATI_RAGE128_PW 0x5057
-#define PCI_DEVICE_ID_ATI_RAGE128_PX 0x5058
-/* Rage128 M4 */
-/* Radeon R100 */
-#define PCI_DEVICE_ID_ATI_RADEON_QD 0x5144
-#define PCI_DEVICE_ID_ATI_RADEON_QE 0x5145
-#define PCI_DEVICE_ID_ATI_RADEON_QF 0x5146
-#define PCI_DEVICE_ID_ATI_RADEON_QG 0x5147
-/* Radeon RV100 (VE) */
-#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159
-#define PCI_DEVICE_ID_ATI_RADEON_QZ 0x515a
-/* Radeon R200 (8500) */
-#define PCI_DEVICE_ID_ATI_RADEON_QL 0x514c
-#define PCI_DEVICE_ID_ATI_RADEON_QN 0x514e
-#define PCI_DEVICE_ID_ATI_RADEON_QO 0x514f
-#define PCI_DEVICE_ID_ATI_RADEON_Ql 0x516c
-#define PCI_DEVICE_ID_ATI_RADEON_BB 0x4242
-/* Radeon R200 (9100) */
-#define PCI_DEVICE_ID_ATI_RADEON_QM 0x514d
-/* Radeon RV200 (7500) */
-#define PCI_DEVICE_ID_ATI_RADEON_QW 0x5157
-#define PCI_DEVICE_ID_ATI_RADEON_QX 0x5158
-/* Radeon NV-100 */
-/* Radeon RV250 (9000) */
-#define PCI_DEVICE_ID_ATI_RADEON_Id 0x4964
-#define PCI_DEVICE_ID_ATI_RADEON_Ie 0x4965
-#define PCI_DEVICE_ID_ATI_RADEON_If 0x4966
-#define PCI_DEVICE_ID_ATI_RADEON_Ig 0x4967
-/* Radeon RV280 (9200) */
-#define PCI_DEVICE_ID_ATI_RADEON_Ya 0x5961
-#define PCI_DEVICE_ID_ATI_RADEON_Yd 0x5964
-/* Radeon R300 (9500) */
-/* Radeon R300 (9700) */
-#define PCI_DEVICE_ID_ATI_RADEON_ND 0x4e44
-#define PCI_DEVICE_ID_ATI_RADEON_NE 0x4e45
-#define PCI_DEVICE_ID_ATI_RADEON_NF 0x4e46
-#define PCI_DEVICE_ID_ATI_RADEON_NG 0x4e47
-/* Radeon R350 (9800) */
-/* Radeon RV350 (9600) */
-/* Radeon M6 */
-#define PCI_DEVICE_ID_ATI_RADEON_LY 0x4c59
-#define PCI_DEVICE_ID_ATI_RADEON_LZ 0x4c5a
-/* Radeon M7 */
-#define PCI_DEVICE_ID_ATI_RADEON_LW 0x4c57
-#define PCI_DEVICE_ID_ATI_RADEON_LX 0x4c58
-/* Radeon M9 */
-#define PCI_DEVICE_ID_ATI_RADEON_Ld 0x4c64
-#define PCI_DEVICE_ID_ATI_RADEON_Le 0x4c65
-#define PCI_DEVICE_ID_ATI_RADEON_Lf 0x4c66
-#define PCI_DEVICE_ID_ATI_RADEON_Lg 0x4c67
-/* Radeon */
-/* RadeonIGP */
-#define PCI_DEVICE_ID_ATI_RS100 0xcab0
-#define PCI_DEVICE_ID_ATI_RS200 0xcab2
-#define PCI_DEVICE_ID_ATI_RS200_B 0xcbb2
-#define PCI_DEVICE_ID_ATI_RS250 0xcab3
-#define PCI_DEVICE_ID_ATI_RS300_100 0x5830
-#define PCI_DEVICE_ID_ATI_RS300_133 0x5831
-#define PCI_DEVICE_ID_ATI_RS300_166 0x5832
-#define PCI_DEVICE_ID_ATI_RS300_200 0x5833
-#define PCI_DEVICE_ID_ATI_RS350_100 0x7830
-#define PCI_DEVICE_ID_ATI_RS350_133 0x7831
-#define PCI_DEVICE_ID_ATI_RS350_166 0x7832
-#define PCI_DEVICE_ID_ATI_RS350_200 0x7833
-#define PCI_DEVICE_ID_ATI_RS400_100 0x5a30
-#define PCI_DEVICE_ID_ATI_RS400_133 0x5a31
-#define PCI_DEVICE_ID_ATI_RS400_166 0x5a32
-#define PCI_DEVICE_ID_ATI_RS400_200 0x5a33
-#define PCI_DEVICE_ID_ATI_RS480 0x5950
-/* ATI IXP Chipset */
-#define PCI_DEVICE_ID_ATI_IXP200_IDE 0x4349
-#define PCI_DEVICE_ID_ATI_IXP200_SMBUS 0x4353
-#define PCI_DEVICE_ID_ATI_IXP300_SMBUS 0x4363
-#define PCI_DEVICE_ID_ATI_IXP300_IDE 0x4369
-#define PCI_DEVICE_ID_ATI_IXP300_SATA 0x436e
-#define PCI_DEVICE_ID_ATI_IXP400_SMBUS 0x4372
-#define PCI_DEVICE_ID_ATI_IXP400_IDE 0x4376
-#define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379
-#define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a
-#define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380
-#define PCI_DEVICE_ID_ATI_IXP600_SRAID 0x4381
-#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c
-
-#define PCI_VENDOR_ID_VLSI 0x1004
-#define PCI_DEVICE_ID_VLSI_82C592 0x0005
-#define PCI_DEVICE_ID_VLSI_82C593 0x0006
-#define PCI_DEVICE_ID_VLSI_82C594 0x0007
-#define PCI_DEVICE_ID_VLSI_82C597 0x0009
-#define PCI_DEVICE_ID_VLSI_82C541 0x000c
-#define PCI_DEVICE_ID_VLSI_82C543 0x000d
-#define PCI_DEVICE_ID_VLSI_82C532 0x0101
-#define PCI_DEVICE_ID_VLSI_82C534 0x0102
-#define PCI_DEVICE_ID_VLSI_82C535 0x0104
-#define PCI_DEVICE_ID_VLSI_82C147 0x0105
-#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
-
-#define PCI_VENDOR_ID_ADL 0x1005
-#define PCI_DEVICE_ID_ADL_2301 0x2301
-
-#define PCI_VENDOR_ID_NS 0x100b
-#define PCI_DEVICE_ID_NS_87415 0x0002
-#define PCI_DEVICE_ID_NS_87560_LIO 0x000e
-#define PCI_DEVICE_ID_NS_87560_USB 0x0012
-#define PCI_DEVICE_ID_NS_83815 0x0020
-#define PCI_DEVICE_ID_NS_83820 0x0022
-#define PCI_DEVICE_ID_NS_CS5535_ISA 0x002b
-#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
-#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e
-#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f
-#define PCI_DEVICE_ID_NS_CS5535_VIDEO 0x0030
-#define PCI_DEVICE_ID_NS_SATURN 0x0035
-#define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500
-#define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501
-#define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502
-#define PCI_DEVICE_ID_NS_SCx200_AUDIO 0x0503
-#define PCI_DEVICE_ID_NS_SCx200_VIDEO 0x0504
-#define PCI_DEVICE_ID_NS_SCx200_XBUS 0x0505
-#define PCI_DEVICE_ID_NS_SC1100_BRIDGE 0x0510
-#define PCI_DEVICE_ID_NS_SC1100_SMI 0x0511
-#define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515
-#define PCI_DEVICE_ID_NS_87410 0xd001
-
-#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE 0x0028
-#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE 0x002b
-
-#define PCI_VENDOR_ID_TSENG 0x100c
-#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
-#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
-#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206
-#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207
-#define PCI_DEVICE_ID_TSENG_ET6000 0x3208
-
-#define PCI_VENDOR_ID_WEITEK 0x100e
-#define PCI_DEVICE_ID_WEITEK_P9000 0x9001
-#define PCI_DEVICE_ID_WEITEK_P9100 0x9100
-
-#define PCI_VENDOR_ID_DEC 0x1011
-#define PCI_DEVICE_ID_DEC_BRD 0x0001
-#define PCI_DEVICE_ID_DEC_TULIP 0x0002
-#define PCI_DEVICE_ID_DEC_TGA 0x0004
-#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009
-#define PCI_DEVICE_ID_DEC_TGA2 0x000D
-#define PCI_DEVICE_ID_DEC_FDDI 0x000F
-#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014
-#define PCI_DEVICE_ID_DEC_21142 0x0019
-#define PCI_DEVICE_ID_DEC_21052 0x0021
-#define PCI_DEVICE_ID_DEC_21150 0x0022
-#define PCI_DEVICE_ID_DEC_21152 0x0024
-#define PCI_DEVICE_ID_DEC_21153 0x0025
-#define PCI_DEVICE_ID_DEC_21154 0x0026
-#define PCI_DEVICE_ID_DEC_21285 0x1065
-#define PCI_DEVICE_ID_COMPAQ_42XX 0x0046
-
-#define PCI_VENDOR_ID_CIRRUS 0x1013
-#define PCI_DEVICE_ID_CIRRUS_7548 0x0038
-#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0
-#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4
-#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8
-#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac
-#define PCI_DEVICE_ID_CIRRUS_5446 0x00b8
-#define PCI_DEVICE_ID_CIRRUS_5480 0x00bc
-#define PCI_DEVICE_ID_CIRRUS_5462 0x00d0
-#define PCI_DEVICE_ID_CIRRUS_5464 0x00d4
-#define PCI_DEVICE_ID_CIRRUS_5465 0x00d6
-#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
-#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
-#define PCI_DEVICE_ID_CIRRUS_7543 0x1202
-#define PCI_DEVICE_ID_CIRRUS_4610 0x6001
-#define PCI_DEVICE_ID_CIRRUS_4612 0x6003
-#define PCI_DEVICE_ID_CIRRUS_4615 0x6004
-
-#define PCI_VENDOR_ID_IBM 0x1014
-#define PCI_DEVICE_ID_IBM_TR 0x0018
-#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e
-#define PCI_DEVICE_ID_IBM_CPC710_PCI64 0x00fc
-#define PCI_DEVICE_ID_IBM_SNIPE 0x0180
-#define PCI_DEVICE_ID_IBM_CITRINE 0x028C
-#define PCI_DEVICE_ID_IBM_GEMSTONE 0xB166
-#define PCI_DEVICE_ID_IBM_OBSIDIAN 0x02BD
-#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1 0x0031
-#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219
-#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A
-#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251
-#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252
-
-#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */
-#define PCI_DEVICE_ID_COMPEX2_100VG 0x0005
-
-#define PCI_VENDOR_ID_WD 0x101c
-#define PCI_DEVICE_ID_WD_90C 0xc24a
-
-#define PCI_VENDOR_ID_AMI 0x101e
-#define PCI_DEVICE_ID_AMI_MEGARAID3 0x1960
-#define PCI_DEVICE_ID_AMI_MEGARAID 0x9010
-#define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060
-
-#define PCI_VENDOR_ID_AMD 0x1022
-#define PCI_DEVICE_ID_AMD_K8_NB 0x1100
-#define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103
-#define PCI_DEVICE_ID_AMD_LANCE 0x2000
-#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
-#define PCI_DEVICE_ID_AMD_SCSI 0x2020
-#define PCI_DEVICE_ID_AMD_SERENADE 0x36c0
-#define PCI_DEVICE_ID_AMD_FE_GATE_7006 0x7006
-#define PCI_DEVICE_ID_AMD_FE_GATE_7007 0x7007
-#define PCI_DEVICE_ID_AMD_FE_GATE_700C 0x700C
-#define PCI_DEVICE_ID_AMD_FE_GATE_700E 0x700E
-#define PCI_DEVICE_ID_AMD_COBRA_7401 0x7401
-#define PCI_DEVICE_ID_AMD_VIPER_7409 0x7409
-#define PCI_DEVICE_ID_AMD_VIPER_740B 0x740B
-#define PCI_DEVICE_ID_AMD_VIPER_7410 0x7410
-#define PCI_DEVICE_ID_AMD_VIPER_7411 0x7411
-#define PCI_DEVICE_ID_AMD_VIPER_7413 0x7413
-#define PCI_DEVICE_ID_AMD_VIPER_7440 0x7440
-#define PCI_DEVICE_ID_AMD_OPUS_7441 0x7441
-#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
-#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
-#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
-#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
-#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
-#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a
-#define PCI_DEVICE_ID_AMD_8111_SMBUS 0x746b
-#define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d
-#define PCI_DEVICE_ID_AMD_8151_0 0x7454
-#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
-#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451
-#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
-#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
-#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
-#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093
-#define PCI_DEVICE_ID_AMD_CS5536_OHC 0x2094
-#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095
-#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
-#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
-#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
-
-#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
-#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
-
-#define PCI_VENDOR_ID_TRIDENT 0x1023
-#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
-#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001
-#define PCI_DEVICE_ID_TRIDENT_9320 0x9320
-#define PCI_DEVICE_ID_TRIDENT_9388 0x9388
-#define PCI_DEVICE_ID_TRIDENT_9397 0x9397
-#define PCI_DEVICE_ID_TRIDENT_939A 0x939A
-#define PCI_DEVICE_ID_TRIDENT_9520 0x9520
-#define PCI_DEVICE_ID_TRIDENT_9525 0x9525
-#define PCI_DEVICE_ID_TRIDENT_9420 0x9420
-#define PCI_DEVICE_ID_TRIDENT_9440 0x9440
-#define PCI_DEVICE_ID_TRIDENT_9660 0x9660
-#define PCI_DEVICE_ID_TRIDENT_9750 0x9750
-#define PCI_DEVICE_ID_TRIDENT_9850 0x9850
-#define PCI_DEVICE_ID_TRIDENT_9880 0x9880
-#define PCI_DEVICE_ID_TRIDENT_8400 0x8400
-#define PCI_DEVICE_ID_TRIDENT_8420 0x8420
-#define PCI_DEVICE_ID_TRIDENT_8500 0x8500
-
-#define PCI_VENDOR_ID_AI 0x1025
-#define PCI_DEVICE_ID_AI_M1435 0x1435
-
-#define PCI_VENDOR_ID_DELL 0x1028
-#define PCI_DEVICE_ID_DELL_RACIII 0x0008
-#define PCI_DEVICE_ID_DELL_RAC4 0x0012
-#define PCI_DEVICE_ID_DELL_PERC5 0x0015
-
-#define PCI_VENDOR_ID_MATROX 0x102B
-#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
-#define PCI_DEVICE_ID_MATROX_MIL 0x0519
-#define PCI_DEVICE_ID_MATROX_MYS 0x051A
-#define PCI_DEVICE_ID_MATROX_MIL_2 0x051b
-#define PCI_DEVICE_ID_MATROX_MYS_AGP 0x051e
-#define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f
-#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10
-#define PCI_DEVICE_ID_MATROX_G100_MM 0x1000
-#define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001
-#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520
-#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521
-#define PCI_DEVICE_ID_MATROX_G400 0x0525
-#define PCI_DEVICE_ID_MATROX_G550 0x2527
-#define PCI_DEVICE_ID_MATROX_VIA 0x4536
-
-#define PCI_VENDOR_ID_CT 0x102c
-#define PCI_DEVICE_ID_CT_69000 0x00c0
-#define PCI_DEVICE_ID_CT_65545 0x00d8
-#define PCI_DEVICE_ID_CT_65548 0x00dc
-#define PCI_DEVICE_ID_CT_65550 0x00e0
-#define PCI_DEVICE_ID_CT_65554 0x00e4
-#define PCI_DEVICE_ID_CT_65555 0x00e5
-
-#define PCI_VENDOR_ID_MIRO 0x1031
-#define PCI_DEVICE_ID_MIRO_36050 0x5601
-#define PCI_DEVICE_ID_MIRO_DC10PLUS 0x7efe
-#define PCI_DEVICE_ID_MIRO_DC30PLUS 0xd801
-
-#define PCI_VENDOR_ID_NEC 0x1033
-#define PCI_DEVICE_ID_NEC_CBUS_1 0x0001 /* PCI-Cbus Bridge */
-#define PCI_DEVICE_ID_NEC_LOCAL 0x0002 /* Local Bridge */
-#define PCI_DEVICE_ID_NEC_ATM 0x0003 /* ATM LAN Controller */
-#define PCI_DEVICE_ID_NEC_R4000 0x0004 /* R4000 Bridge */
-#define PCI_DEVICE_ID_NEC_486 0x0005 /* 486 Like Peripheral Bus Bridge */
-#define PCI_DEVICE_ID_NEC_ACCEL_1 0x0006 /* Graphic Accelerator */
-#define PCI_DEVICE_ID_NEC_UXBUS 0x0007 /* UX-Bus Bridge */
-#define PCI_DEVICE_ID_NEC_ACCEL_2 0x0008 /* Graphic Accelerator */
-#define PCI_DEVICE_ID_NEC_GRAPH 0x0009 /* PCI-CoreGraph Bridge */
-#define PCI_DEVICE_ID_NEC_VL 0x0016 /* PCI-VL Bridge */
-#define PCI_DEVICE_ID_NEC_STARALPHA2 0x002c /* STAR ALPHA2 */
-#define PCI_DEVICE_ID_NEC_CBUS_2 0x002d /* PCI-Cbus Bridge */
-#define PCI_DEVICE_ID_NEC_USB 0x0035 /* PCI-USB Host */
-#define PCI_DEVICE_ID_NEC_CBUS_3 0x003b
-#define PCI_DEVICE_ID_NEC_NAPCCARD 0x003e
-#define PCI_DEVICE_ID_NEC_PCX2 0x0046 /* PowerVR */
-#define PCI_DEVICE_ID_NEC_NILE4 0x005a
-#define PCI_DEVICE_ID_NEC_VRC5476 0x009b
-#define PCI_DEVICE_ID_NEC_VRC4173 0x00a5
-#define PCI_DEVICE_ID_NEC_VRC5477_AC97 0x00a6
-#define PCI_DEVICE_ID_NEC_PC9821CS01 0x800c /* PC-9821-CS01 */
-#define PCI_DEVICE_ID_NEC_PC9821NRB06 0x800d /* PC-9821NR-B06 */
-
-#define PCI_VENDOR_ID_FD 0x1036
-#define PCI_DEVICE_ID_FD_36C70 0x0000
-
-#define PCI_VENDOR_ID_SI 0x1039
-#define PCI_DEVICE_ID_SI_5591_AGP 0x0001
-#define PCI_DEVICE_ID_SI_6202 0x0002
-#define PCI_DEVICE_ID_SI_503 0x0008
-#define PCI_DEVICE_ID_SI_ACPI 0x0009
-#define PCI_DEVICE_ID_SI_SMBUS 0x0016
-#define PCI_DEVICE_ID_SI_LPC 0x0018
-#define PCI_DEVICE_ID_SI_5597_VGA 0x0200
-#define PCI_DEVICE_ID_SI_6205 0x0205
-#define PCI_DEVICE_ID_SI_501 0x0406
-#define PCI_DEVICE_ID_SI_496 0x0496
-#define PCI_DEVICE_ID_SI_300 0x0300
-#define PCI_DEVICE_ID_SI_315H 0x0310
-#define PCI_DEVICE_ID_SI_315 0x0315
-#define PCI_DEVICE_ID_SI_315PRO 0x0325
-#define PCI_DEVICE_ID_SI_530 0x0530
-#define PCI_DEVICE_ID_SI_540 0x0540
-#define PCI_DEVICE_ID_SI_550 0x0550
-#define PCI_DEVICE_ID_SI_540_VGA 0x5300
-#define PCI_DEVICE_ID_SI_550_VGA 0x5315
-#define PCI_DEVICE_ID_SI_620 0x0620
-#define PCI_DEVICE_ID_SI_630 0x0630
-#define PCI_DEVICE_ID_SI_633 0x0633
-#define PCI_DEVICE_ID_SI_635 0x0635
-#define PCI_DEVICE_ID_SI_640 0x0640
-#define PCI_DEVICE_ID_SI_645 0x0645
-#define PCI_DEVICE_ID_SI_646 0x0646
-#define PCI_DEVICE_ID_SI_648 0x0648
-#define PCI_DEVICE_ID_SI_650 0x0650
-#define PCI_DEVICE_ID_SI_651 0x0651
-#define PCI_DEVICE_ID_SI_655 0x0655
-#define PCI_DEVICE_ID_SI_661 0x0661
-#define PCI_DEVICE_ID_SI_730 0x0730
-#define PCI_DEVICE_ID_SI_733 0x0733
-#define PCI_DEVICE_ID_SI_630_VGA 0x6300
-#define PCI_DEVICE_ID_SI_735 0x0735
-#define PCI_DEVICE_ID_SI_740 0x0740
-#define PCI_DEVICE_ID_SI_741 0x0741
-#define PCI_DEVICE_ID_SI_745 0x0745
-#define PCI_DEVICE_ID_SI_746 0x0746
-#define PCI_DEVICE_ID_SI_755 0x0755
-#define PCI_DEVICE_ID_SI_760 0x0760
-#define PCI_DEVICE_ID_SI_900 0x0900
-#define PCI_DEVICE_ID_SI_961 0x0961
-#define PCI_DEVICE_ID_SI_962 0x0962
-#define PCI_DEVICE_ID_SI_963 0x0963
-#define PCI_DEVICE_ID_SI_965 0x0965
-#define PCI_DEVICE_ID_SI_966 0x0966
-#define PCI_DEVICE_ID_SI_968 0x0968
-#define PCI_DEVICE_ID_SI_5511 0x5511
-#define PCI_DEVICE_ID_SI_5513 0x5513
-#define PCI_DEVICE_ID_SI_5517 0x5517
-#define PCI_DEVICE_ID_SI_5518 0x5518
-#define PCI_DEVICE_ID_SI_5571 0x5571
-#define PCI_DEVICE_ID_SI_5581 0x5581
-#define PCI_DEVICE_ID_SI_5582 0x5582
-#define PCI_DEVICE_ID_SI_5591 0x5591
-#define PCI_DEVICE_ID_SI_5596 0x5596
-#define PCI_DEVICE_ID_SI_5597 0x5597
-#define PCI_DEVICE_ID_SI_5598 0x5598
-#define PCI_DEVICE_ID_SI_5600 0x5600
-#define PCI_DEVICE_ID_SI_7012 0x7012
-#define PCI_DEVICE_ID_SI_7013 0x7013
-#define PCI_DEVICE_ID_SI_7016 0x7016
-#define PCI_DEVICE_ID_SI_7018 0x7018
-
-#define PCI_VENDOR_ID_HP 0x103c
-#define PCI_DEVICE_ID_HP_VISUALIZE_EG 0x1005
-#define PCI_DEVICE_ID_HP_VISUALIZE_FX6 0x1006
-#define PCI_DEVICE_ID_HP_VISUALIZE_FX4 0x1008
-#define PCI_DEVICE_ID_HP_VISUALIZE_FX2 0x100a
-#define PCI_DEVICE_ID_HP_TACHYON 0x1028
-#define PCI_DEVICE_ID_HP_TACHLITE 0x1029
-#define PCI_DEVICE_ID_HP_J2585A 0x1030
-#define PCI_DEVICE_ID_HP_J2585B 0x1031
-#define PCI_DEVICE_ID_HP_J2973A 0x1040
-#define PCI_DEVICE_ID_HP_J2970A 0x1042
-#define PCI_DEVICE_ID_HP_DIVA 0x1048
-#define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049
-#define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A
-#define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B
-#define PCI_DEVICE_ID_HP_REO_IOC 0x10f1
-#define PCI_DEVICE_ID_HP_VISUALIZE_FXE 0x108b
-#define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223
-#define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226
-#define PCI_DEVICE_ID_HP_DIVA_POWERBAR 0x1227
-#define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a
-#define PCI_DEVICE_ID_HP_PCIX_LBA 0x122e
-#define PCI_DEVICE_ID_HP_SX1000_IOC 0x127c
-#define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282
-#define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290
-#define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301
-#define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a
-#define PCI_DEVICE_ID_HP_CISSA 0x3220
-#define PCI_DEVICE_ID_HP_CISSC 0x3230
-#define PCI_DEVICE_ID_HP_CISSD 0x3238
-#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
-
-#define PCI_VENDOR_ID_PCTECH 0x1042
-#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000
-#define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001
-#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020
-
-#define PCI_VENDOR_ID_ASUSTEK 0x1043
-#define PCI_DEVICE_ID_ASUSTEK_0675 0x0675
-
-#define PCI_VENDOR_ID_DPT 0x1044
-#define PCI_DEVICE_ID_DPT 0xa400
-
-#define PCI_VENDOR_ID_OPTI 0x1045
-#define PCI_DEVICE_ID_OPTI_82C558 0xc558
-#define PCI_DEVICE_ID_OPTI_82C621 0xc621
-#define PCI_DEVICE_ID_OPTI_82C700 0xc700
-#define PCI_DEVICE_ID_OPTI_82C825 0xd568
-
-#define PCI_VENDOR_ID_ELSA 0x1048
-#define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000
-#define PCI_DEVICE_ID_ELSA_QS3000 0x3000
-
-
-#define PCI_VENDOR_ID_BUSLOGIC 0x104B
-#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
-#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
-#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130
-
-#define PCI_VENDOR_ID_TI 0x104c
-#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
-#define PCI_DEVICE_ID_TI_4450 0x8011
-#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
-#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
-#define PCI_DEVICE_ID_TI_X515 0x8036
-#define PCI_DEVICE_ID_TI_XX12 0x8039
-#define PCI_DEVICE_ID_TI_1130 0xac12
-#define PCI_DEVICE_ID_TI_1031 0xac13
-#define PCI_DEVICE_ID_TI_1131 0xac15
-#define PCI_DEVICE_ID_TI_1250 0xac16
-#define PCI_DEVICE_ID_TI_1220 0xac17
-#define PCI_DEVICE_ID_TI_1221 0xac19
-#define PCI_DEVICE_ID_TI_1210 0xac1a
-#define PCI_DEVICE_ID_TI_1450 0xac1b
-#define PCI_DEVICE_ID_TI_1225 0xac1c
-#define PCI_DEVICE_ID_TI_1251A 0xac1d
-#define PCI_DEVICE_ID_TI_1211 0xac1e
-#define PCI_DEVICE_ID_TI_1251B 0xac1f
-#define PCI_DEVICE_ID_TI_4410 0xac41
-#define PCI_DEVICE_ID_TI_4451 0xac42
-#define PCI_DEVICE_ID_TI_4510 0xac44
-#define PCI_DEVICE_ID_TI_4520 0xac46
-#define PCI_DEVICE_ID_TI_7510 0xac47
-#define PCI_DEVICE_ID_TI_7610 0xac48
-#define PCI_DEVICE_ID_TI_7410 0xac49
-#define PCI_DEVICE_ID_TI_1410 0xac50
-#define PCI_DEVICE_ID_TI_1420 0xac51
-#define PCI_DEVICE_ID_TI_1451A 0xac52
-#define PCI_DEVICE_ID_TI_1620 0xac54
-#define PCI_DEVICE_ID_TI_1520 0xac55
-#define PCI_DEVICE_ID_TI_1510 0xac56
-#define PCI_DEVICE_ID_TI_X620 0xac8d
-#define PCI_DEVICE_ID_TI_X420 0xac8e
-
-#define PCI_VENDOR_ID_SONY 0x104d
-
-
-/* Winbond have two vendor IDs! See 0x10ad as well */
-#define PCI_VENDOR_ID_WINBOND2 0x1050
-#define PCI_DEVICE_ID_WINBOND2_89C940F 0x5a5a
-#define PCI_DEVICE_ID_WINBOND2_6692 0x6692
-
-#define PCI_VENDOR_ID_ANIGMA 0x1051
-#define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100
-
-#define PCI_VENDOR_ID_EFAR 0x1055
-#define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130
-#define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463
-
-#define PCI_VENDOR_ID_MOTOROLA 0x1057
-#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001
-#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002
-#define PCI_DEVICE_ID_MOTOROLA_MPC107 0x0004
-#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801
-#define PCI_DEVICE_ID_MOTOROLA_FALCON 0x4802
-#define PCI_DEVICE_ID_MOTOROLA_HAWK 0x4803
-#define PCI_DEVICE_ID_MOTOROLA_HARRIER 0x480b
-#define PCI_DEVICE_ID_MOTOROLA_MPC5200 0x5803
-#define PCI_DEVICE_ID_MOTOROLA_MPC5200B 0x5809
-
-#define PCI_VENDOR_ID_PROMISE 0x105a
-#define PCI_DEVICE_ID_PROMISE_20265 0x0d30
-#define PCI_DEVICE_ID_PROMISE_20267 0x4d30
-#define PCI_DEVICE_ID_PROMISE_20246 0x4d33
-#define PCI_DEVICE_ID_PROMISE_20262 0x4d38
-#define PCI_DEVICE_ID_PROMISE_20263 0x0D38
-#define PCI_DEVICE_ID_PROMISE_20268 0x4d68
-#define PCI_DEVICE_ID_PROMISE_20269 0x4d69
-#define PCI_DEVICE_ID_PROMISE_20270 0x6268
-#define PCI_DEVICE_ID_PROMISE_20271 0x6269
-#define PCI_DEVICE_ID_PROMISE_20275 0x1275
-#define PCI_DEVICE_ID_PROMISE_20276 0x5275
-#define PCI_DEVICE_ID_PROMISE_20277 0x7275
-
-
-#define PCI_VENDOR_ID_UMC 0x1060
-#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
-#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
-#define PCI_DEVICE_ID_UMC_UM8886A 0x886a
-
-
-#define PCI_VENDOR_ID_MYLEX 0x1069
-#define PCI_DEVICE_ID_MYLEX_DAC960_P 0x0001
-#define PCI_DEVICE_ID_MYLEX_DAC960_PD 0x0002
-#define PCI_DEVICE_ID_MYLEX_DAC960_PG 0x0010
-#define PCI_DEVICE_ID_MYLEX_DAC960_LA 0x0020
-#define PCI_DEVICE_ID_MYLEX_DAC960_LP 0x0050
-#define PCI_DEVICE_ID_MYLEX_DAC960_BA 0xBA56
-#define PCI_DEVICE_ID_MYLEX_DAC960_GEM 0xB166
-
-
-#define PCI_VENDOR_ID_APPLE 0x106b
-#define PCI_DEVICE_ID_APPLE_BANDIT 0x0001
-#define PCI_DEVICE_ID_APPLE_HYDRA 0x000e
-#define PCI_DEVICE_ID_APPLE_UNI_N_FW 0x0018
-#define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020
-#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021
-#define PCI_DEVICE_ID_APPLE_UNI_N_GMACP 0x0024
-#define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027
-#define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d
-#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e
-#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032
-#define PCI_DEVICE_ID_APPLE_UNI_N_ATA 0x0033
-#define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034
-#define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b
-#define PCI_DEVICE_ID_APPLE_K2_ATA100 0x0043
-#define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b
-#define PCI_DEVICE_ID_APPLE_K2_GMAC 0x004c
-#define PCI_DEVICE_ID_APPLE_SH_ATA 0x0050
-#define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051
-#define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058
-#define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059
-#define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066
-#define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069
-#define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a
-#define PCI_DEVICE_ID_APPLE_IPID2_GMAC 0x006b
-#define PCI_DEVICE_ID_APPLE_TIGON3 0x1645
-
-#define PCI_VENDOR_ID_YAMAHA 0x1073
-#define PCI_DEVICE_ID_YAMAHA_724 0x0004
-#define PCI_DEVICE_ID_YAMAHA_724F 0x000d
-#define PCI_DEVICE_ID_YAMAHA_740 0x000a
-#define PCI_DEVICE_ID_YAMAHA_740C 0x000c
-#define PCI_DEVICE_ID_YAMAHA_744 0x0010
-#define PCI_DEVICE_ID_YAMAHA_754 0x0012
-
-
-#define PCI_VENDOR_ID_QLOGIC 0x1077
-#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016
-#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020
-#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080
-#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216
-#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240
-#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280
-#define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100
-#define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200
-#define PCI_DEVICE_ID_QLOGIC_ISP2300 0x2300
-#define PCI_DEVICE_ID_QLOGIC_ISP2312 0x2312
-#define PCI_DEVICE_ID_QLOGIC_ISP2322 0x2322
-#define PCI_DEVICE_ID_QLOGIC_ISP6312 0x6312
-#define PCI_DEVICE_ID_QLOGIC_ISP6322 0x6322
-#define PCI_DEVICE_ID_QLOGIC_ISP2422 0x2422
-#define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432
-#define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512
-#define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522
-#define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422
-#define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432
-
-#define PCI_VENDOR_ID_CYRIX 0x1078
-#define PCI_DEVICE_ID_CYRIX_5510 0x0000
-#define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001
-#define PCI_DEVICE_ID_CYRIX_5520 0x0002
-#define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100
-#define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102
-#define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103
-#define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104
-
-
-
-#define PCI_VENDOR_ID_CONTAQ 0x1080
-#define PCI_DEVICE_ID_CONTAQ_82C693 0xc693
-
-
-#define PCI_VENDOR_ID_OLICOM 0x108d
-#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
-#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
-#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
-
-#define PCI_VENDOR_ID_SUN 0x108e
-#define PCI_DEVICE_ID_SUN_EBUS 0x1000
-#define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001
-#define PCI_DEVICE_ID_SUN_RIO_EBUS 0x1100
-#define PCI_DEVICE_ID_SUN_RIO_GEM 0x1101
-#define PCI_DEVICE_ID_SUN_RIO_1394 0x1102
-#define PCI_DEVICE_ID_SUN_RIO_USB 0x1103
-#define PCI_DEVICE_ID_SUN_GEM 0x2bad
-#define PCI_DEVICE_ID_SUN_SIMBA 0x5000
-#define PCI_DEVICE_ID_SUN_PBM 0x8000
-#define PCI_DEVICE_ID_SUN_SCHIZO 0x8001
-#define PCI_DEVICE_ID_SUN_SABRE 0xa000
-#define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001
-#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801
-#define PCI_DEVICE_ID_SUN_CASSINI 0xabba
-
-#define PCI_VENDOR_ID_CMD 0x1095
-#define PCI_DEVICE_ID_CMD_643 0x0643
-#define PCI_DEVICE_ID_CMD_646 0x0646
-#define PCI_DEVICE_ID_CMD_648 0x0648
-#define PCI_DEVICE_ID_CMD_649 0x0649
-
-#define PCI_DEVICE_ID_SII_680 0x0680
-#define PCI_DEVICE_ID_SII_3112 0x3112
-#define PCI_DEVICE_ID_SII_1210SA 0x0240
-
-
-#define PCI_VENDOR_ID_BROOKTREE 0x109e
-#define PCI_DEVICE_ID_BROOKTREE_878 0x0878
-#define PCI_DEVICE_ID_BROOKTREE_879 0x0879
-
-
-#define PCI_VENDOR_ID_SGI 0x10a9
-#define PCI_DEVICE_ID_SGI_IOC3 0x0003
-#define PCI_DEVICE_ID_SGI_IOC4 0x100a
-#define PCI_VENDOR_ID_SGI_LITHIUM 0x1002
-
-
-#define PCI_VENDOR_ID_WINBOND 0x10ad
-#define PCI_DEVICE_ID_WINBOND_82C105 0x0105
-#define PCI_DEVICE_ID_WINBOND_83C553 0x0565
-
-
-#define PCI_VENDOR_ID_PLX 0x10b5
-#define PCI_DEVICE_ID_PLX_R685 0x1030
-#define PCI_DEVICE_ID_PLX_ROMULUS 0x106a
-#define PCI_DEVICE_ID_PLX_SPCOM800 0x1076
-#define PCI_DEVICE_ID_PLX_1077 0x1077
-#define PCI_DEVICE_ID_PLX_SPCOM200 0x1103
-#define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151
-#define PCI_DEVICE_ID_PLX_R753 0x1152
-#define PCI_DEVICE_ID_PLX_OLITEC 0x1187
-#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196
-#define PCI_DEVICE_ID_PLX_9050 0x9050
-#define PCI_DEVICE_ID_PLX_9080 0x9080
-#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001
-
-#define PCI_VENDOR_ID_MADGE 0x10b6
-#define PCI_DEVICE_ID_MADGE_MK2 0x0002
-
-#define PCI_VENDOR_ID_3COM 0x10b7
-#define PCI_DEVICE_ID_3COM_3C985 0x0001
-#define PCI_DEVICE_ID_3COM_3C940 0x1700
-#define PCI_DEVICE_ID_3COM_3C339 0x3390
-#define PCI_DEVICE_ID_3COM_3C359 0x3590
-#define PCI_DEVICE_ID_3COM_3C940B 0x80eb
-#define PCI_DEVICE_ID_3COM_3CR990 0x9900
-#define PCI_DEVICE_ID_3COM_3CR990_TX_95 0x9902
-#define PCI_DEVICE_ID_3COM_3CR990_TX_97 0x9903
-#define PCI_DEVICE_ID_3COM_3CR990B 0x9904
-#define PCI_DEVICE_ID_3COM_3CR990_FX 0x9905
-#define PCI_DEVICE_ID_3COM_3CR990SVR95 0x9908
-#define PCI_DEVICE_ID_3COM_3CR990SVR97 0x9909
-#define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a
-
-
-#define PCI_VENDOR_ID_AL 0x10b9
-#define PCI_DEVICE_ID_AL_M1533 0x1533
-#define PCI_DEVICE_ID_AL_M1535 0x1535
-#define PCI_DEVICE_ID_AL_M1541 0x1541
-#define PCI_DEVICE_ID_AL_M1563 0x1563
-#define PCI_DEVICE_ID_AL_M1621 0x1621
-#define PCI_DEVICE_ID_AL_M1631 0x1631
-#define PCI_DEVICE_ID_AL_M1632 0x1632
-#define PCI_DEVICE_ID_AL_M1641 0x1641
-#define PCI_DEVICE_ID_AL_M1644 0x1644
-#define PCI_DEVICE_ID_AL_M1647 0x1647
-#define PCI_DEVICE_ID_AL_M1651 0x1651
-#define PCI_DEVICE_ID_AL_M1671 0x1671
-#define PCI_DEVICE_ID_AL_M1681 0x1681
-#define PCI_DEVICE_ID_AL_M1683 0x1683
-#define PCI_DEVICE_ID_AL_M1689 0x1689
-#define PCI_DEVICE_ID_AL_M5219 0x5219
-#define PCI_DEVICE_ID_AL_M5228 0x5228
-#define PCI_DEVICE_ID_AL_M5229 0x5229
-#define PCI_DEVICE_ID_AL_M5451 0x5451
-#define PCI_DEVICE_ID_AL_M7101 0x7101
-
-
-
-#define PCI_VENDOR_ID_NEOMAGIC 0x10c8
-#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005
-#define PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO 0x8006
-#define PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO 0x8016
-
-
-#define PCI_VENDOR_ID_TCONRAD 0x10da
-#define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508
-
-
-#define PCI_VENDOR_ID_NVIDIA 0x10de
-#define PCI_DEVICE_ID_NVIDIA_TNT 0x0020
-#define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028
-#define PCI_DEVICE_ID_NVIDIA_UTNT2 0x0029
-#define PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN 0x002a
-#define PCI_DEVICE_ID_NVIDIA_VTNT2 0x002C
-#define PCI_DEVICE_ID_NVIDIA_UVTNT2 0x002D
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036
-#define PCI_DEVICE_ID_NVIDIA_NVENET_10 0x0037
-#define PCI_DEVICE_ID_NVIDIA_NVENET_11 0x0038
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800 0x0041
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE 0x0042
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x0045
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000 0x004E
-#define PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS 0x0052
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055
-#define PCI_DEVICE_ID_NVIDIA_NVENET_8 0x0056
-#define PCI_DEVICE_ID_NVIDIA_NVENET_9 0x0057
-#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059
-#define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d
-#define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064
-#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065
-#define PCI_DEVICE_ID_NVIDIA_NVENET_2 0x0066
-#define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069
-#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a
-#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084
-#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085
-#define PCI_DEVICE_ID_NVIDIA_NVENET_4 0x0086
-#define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089
-#define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a
-#define PCI_DEVICE_ID_NVIDIA_NVENET_5 0x008c
-#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT 0x0090
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800 0x0098
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX 0x0099
-#define PCI_DEVICE_ID_NVIDIA_ITNT2 0x00A0
-#define PCI_DEVICE_ID_GEFORCE_6800A 0x00c1
-#define PCI_DEVICE_ID_GEFORCE_6800A_LE 0x00c2
-#define PCI_DEVICE_ID_GEFORCE_GO_6800 0x00c8
-#define PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA 0x00c9
-#define PCI_DEVICE_ID_QUADRO_FX_GO1400 0x00cc
-#define PCI_DEVICE_ID_QUADRO_FX_1400 0x00ce
-#define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1
-#define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4
-#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5
-#define PCI_DEVICE_ID_NVIDIA_NVENET_3 0x00d6
-#define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9
-#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da
-#define PCI_DEVICE_ID_NVIDIA_NVENET_7 0x00df
-#define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1
-#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3
-#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4
-#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5
-#define PCI_DEVICE_ID_NVIDIA_NVENET_6 0x00e6
-#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea
-#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee
-#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0
-#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1 0x00f1
-#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2
-#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3
-#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9
-#define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101
-#define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX 0x0110
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2 0x0111
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO 0x0112
-#define PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR 0x0113
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT 0x0140
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600 0x0141
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL 0x0145
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540 0x014E
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200 0x014F
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS 0x0150
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2 0x0151
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA 0x0152
-#define PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO 0x0153
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE 0x0161
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200 0x0164
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250 0x0166
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1 0x0167
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1 0x0168
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460 0x0170
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440 0x0171
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420 0x0172
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE 0x0173
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO 0x0174
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO 0x0175
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32 0x0176
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO 0x0177
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL 0x0178
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64 0x0179
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_200 0x017A
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL 0x017B
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL 0x017C
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16 0x017D
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X 0x0181
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X 0x0182
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X 0x0183
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_4000 0x0185
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO 0x0186
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO 0x0187
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL 0x0188
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC 0x0189
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS 0x018A
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL 0x018B
-#define PCI_DEVICE_ID_NVIDIA_IGEFORCE2 0x01a0
-#define PCI_DEVICE_ID_NVIDIA_NFORCE 0x01a4
-#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc
-#define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1
-#define PCI_DEVICE_ID_NVIDIA_NVENET_1 0x01c3
-#define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_2 0x0202
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_DDC 0x0203
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B 0x0211
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE 0x0212
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT 0x0215
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600 0x0250
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400 0x0251
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200 0x0253
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL 0x0258
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL 0x0259
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL 0x025B
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS 0x0264
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE 0x0265
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS 0x0368
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F
-#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
-#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO 0x0286
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL 0x0288
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL 0x0289
-#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL 0x028C
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA 0x0301
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800 0x0302
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000 0x0308
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000 0x0309
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA 0x0311
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600 0x0312
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE 0x0314
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600 0x031A
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650 0x031B
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700 0x031C
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200 0x0320
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA 0x0321
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1 0x0322
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE 0x0323
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200 0x0324
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250 0x0325
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500 0x0326
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100 0x0327
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32 0x0328
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200 0x0329
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI 0x032A
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500 0x032B
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300 0x032C
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100 0x032D
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA 0x0330
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900 0x0331
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT 0x0332
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA 0x0333
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT 0x0334
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000 0x0338
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700 0x033F
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA 0x0341
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700 0x0342
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE 0x0343
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE 0x0344
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1 0x0347
-#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C
-#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
-#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372
-#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
-#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
-#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC
-#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE
-#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448
-#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
-#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
-#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
-#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560
-
-#define PCI_VENDOR_ID_IMS 0x10e0
-#define PCI_DEVICE_ID_IMS_TT128 0x9128
-#define PCI_DEVICE_ID_IMS_TT3D 0x9135
-
-
-
-
-#define PCI_VENDOR_ID_INTERG 0x10ea
-#define PCI_DEVICE_ID_INTERG_1682 0x1682
-#define PCI_DEVICE_ID_INTERG_2000 0x2000
-#define PCI_DEVICE_ID_INTERG_2010 0x2010
-#define PCI_DEVICE_ID_INTERG_5000 0x5000
-#define PCI_DEVICE_ID_INTERG_5050 0x5050
-
-#define PCI_VENDOR_ID_REALTEK 0x10ec
-#define PCI_DEVICE_ID_REALTEK_8139 0x8139
-
-#define PCI_VENDOR_ID_XILINX 0x10ee
-#define PCI_DEVICE_ID_RME_DIGI96 0x3fc0
-#define PCI_DEVICE_ID_RME_DIGI96_8 0x3fc1
-#define PCI_DEVICE_ID_RME_DIGI96_8_PRO 0x3fc2
-#define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3
-#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5
-#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6
-
-
-#define PCI_VENDOR_ID_INIT 0x1101
-
-#define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */
-#define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002
-
-#define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */
-#define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938
-
-#define PCI_VENDOR_ID_TTI 0x1103
-#define PCI_DEVICE_ID_TTI_HPT343 0x0003
-#define PCI_DEVICE_ID_TTI_HPT366 0x0004
-#define PCI_DEVICE_ID_TTI_HPT372 0x0005
-#define PCI_DEVICE_ID_TTI_HPT302 0x0006
-#define PCI_DEVICE_ID_TTI_HPT371 0x0007
-#define PCI_DEVICE_ID_TTI_HPT374 0x0008
-#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */
-
-#define PCI_VENDOR_ID_VIA 0x1106
-#define PCI_DEVICE_ID_VIA_8763_0 0x0198
-#define PCI_DEVICE_ID_VIA_8380_0 0x0204
-#define PCI_DEVICE_ID_VIA_3238_0 0x0238
-#define PCI_DEVICE_ID_VIA_PT880 0x0258
-#define PCI_DEVICE_ID_VIA_PT880ULTRA 0x0308
-#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259
-#define PCI_DEVICE_ID_VIA_3269_0 0x0269
-#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
-#define PCI_DEVICE_ID_VIA_3296_0 0x0296
-#define PCI_DEVICE_ID_VIA_8363_0 0x0305
-#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314
-#define PCI_DEVICE_ID_VIA_8371_0 0x0391
-#define PCI_DEVICE_ID_VIA_8501_0 0x0501
-#define PCI_DEVICE_ID_VIA_82C561 0x0561
-#define PCI_DEVICE_ID_VIA_82C586_1 0x0571
-#define PCI_DEVICE_ID_VIA_82C576 0x0576
-#define PCI_DEVICE_ID_VIA_SATA_EIDE 0x0581
-#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
-#define PCI_DEVICE_ID_VIA_82C596 0x0596
-#define PCI_DEVICE_ID_VIA_82C597_0 0x0597
-#define PCI_DEVICE_ID_VIA_82C598_0 0x0598
-#define PCI_DEVICE_ID_VIA_8601_0 0x0601
-#define PCI_DEVICE_ID_VIA_8605_0 0x0605
-#define PCI_DEVICE_ID_VIA_82C686 0x0686
-#define PCI_DEVICE_ID_VIA_82C691_0 0x0691
-#define PCI_DEVICE_ID_VIA_82C576_1 0x1571
-#define PCI_DEVICE_ID_VIA_82C586_2 0x3038
-#define PCI_DEVICE_ID_VIA_82C586_3 0x3040
-#define PCI_DEVICE_ID_VIA_82C596_3 0x3050
-#define PCI_DEVICE_ID_VIA_82C596B_3 0x3051
-#define PCI_DEVICE_ID_VIA_82C686_4 0x3057
-#define PCI_DEVICE_ID_VIA_82C686_5 0x3058
-#define PCI_DEVICE_ID_VIA_8233_5 0x3059
-#define PCI_DEVICE_ID_VIA_8233_0 0x3074
-#define PCI_DEVICE_ID_VIA_8633_0 0x3091
-#define PCI_DEVICE_ID_VIA_8367_0 0x3099
-#define PCI_DEVICE_ID_VIA_8653_0 0x3101
-#define PCI_DEVICE_ID_VIA_8622 0x3102
-#define PCI_DEVICE_ID_VIA_8235_USB_2 0x3104
-#define PCI_DEVICE_ID_VIA_8233C_0 0x3109
-#define PCI_DEVICE_ID_VIA_8361 0x3112
-#define PCI_DEVICE_ID_VIA_XM266 0x3116
-#define PCI_DEVICE_ID_VIA_612X 0x3119
-#define PCI_DEVICE_ID_VIA_862X_0 0x3123
-#define PCI_DEVICE_ID_VIA_8753_0 0x3128
-#define PCI_DEVICE_ID_VIA_8233A 0x3147
-#define PCI_DEVICE_ID_VIA_8703_51_0 0x3148
-#define PCI_DEVICE_ID_VIA_8237_SATA 0x3149
-#define PCI_DEVICE_ID_VIA_XN266 0x3156
-#define PCI_DEVICE_ID_VIA_6410 0x3164
-#define PCI_DEVICE_ID_VIA_8754C_0 0x3168
-#define PCI_DEVICE_ID_VIA_8235 0x3177
-#define PCI_DEVICE_ID_VIA_8385_0 0x3188
-#define PCI_DEVICE_ID_VIA_8377_0 0x3189
-#define PCI_DEVICE_ID_VIA_8378_0 0x3205
-#define PCI_DEVICE_ID_VIA_8783_0 0x3208
-#define PCI_DEVICE_ID_VIA_8237 0x3227
-#define PCI_DEVICE_ID_VIA_8251 0x3287
-#define PCI_DEVICE_ID_VIA_8237A 0x3337
-#define PCI_DEVICE_ID_VIA_8231 0x8231
-#define PCI_DEVICE_ID_VIA_8231_4 0x8235
-#define PCI_DEVICE_ID_VIA_8365_1 0x8305
-#define PCI_DEVICE_ID_VIA_CX700 0x8324
-#define PCI_DEVICE_ID_VIA_8371_1 0x8391
-#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
-#define PCI_DEVICE_ID_VIA_838X_1 0xB188
-#define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198
-
-#define PCI_VENDOR_ID_SIEMENS 0x110A
-#define PCI_DEVICE_ID_SIEMENS_DSCC4 0x2102
-
-
-#define PCI_VENDOR_ID_VORTEX 0x1119
-#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000
-#define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001
-#define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002
-#define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003
-#define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004
-#define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005
-#define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006
-#define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007
-#define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008
-#define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009
-#define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a
-#define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b
-#define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c
-#define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d
-#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100
-#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101
-#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102
-#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103
-#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104
-#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105
-
-#define PCI_VENDOR_ID_EF 0x111a
-#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000
-#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002
-#define PCI_VENDOR_ID_EF_ATM_LANAI2 0x0003
-#define PCI_VENDOR_ID_EF_ATM_LANAIHB 0x0005
-
-#define PCI_VENDOR_ID_IDT 0x111d
-#define PCI_DEVICE_ID_IDT_IDT77201 0x0001
-
-#define PCI_VENDOR_ID_FORE 0x1127
-#define PCI_DEVICE_ID_FORE_PCA200E 0x0300
-
-
-#define PCI_VENDOR_ID_PHILIPS 0x1131
-#define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146
-#define PCI_DEVICE_ID_PHILIPS_SAA9730 0x9730
-
-#define PCI_VENDOR_ID_EICON 0x1133
-#define PCI_DEVICE_ID_EICON_DIVA20 0xe002
-#define PCI_DEVICE_ID_EICON_DIVA20_U 0xe004
-#define PCI_DEVICE_ID_EICON_DIVA201 0xe005
-#define PCI_DEVICE_ID_EICON_DIVA202 0xe00b
-#define PCI_DEVICE_ID_EICON_MAESTRA 0xe010
-#define PCI_DEVICE_ID_EICON_MAESTRAQ 0xe012
-#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013
-#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014
-
-#define PCI_VENDOR_ID_ZIATECH 0x1138
-#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550
-
-
-
-#define PCI_VENDOR_ID_SYSKONNECT 0x1148
-#define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200
-#define PCI_DEVICE_ID_SYSKONNECT_GE 0x4300
-#define PCI_DEVICE_ID_SYSKONNECT_YU 0x4320
-#define PCI_DEVICE_ID_SYSKONNECT_9DXX 0x4400
-#define PCI_DEVICE_ID_SYSKONNECT_9MXX 0x4500
-
-
-#define PCI_VENDOR_ID_DIGI 0x114f
-#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_E 0x0070
-#define PCI_DEVICE_ID_DIGI_DF_M_E 0x0071
-#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A 0x0072
-#define PCI_DEVICE_ID_DIGI_DF_M_A 0x0073
-#define PCI_DEVICE_ID_NEO_2DB9 0x00C8
-#define PCI_DEVICE_ID_NEO_2DB9PRI 0x00C9
-#define PCI_DEVICE_ID_NEO_2RJ45 0x00CA
-#define PCI_DEVICE_ID_NEO_2RJ45PRI 0x00CB
-
-
-#define PCI_VENDOR_ID_XIRCOM 0x115d
-#define PCI_DEVICE_ID_XIRCOM_RBM56G 0x0101
-#define PCI_DEVICE_ID_XIRCOM_X3201_MDM 0x0103
-
-
-#define PCI_VENDOR_ID_SERVERWORKS 0x1166
-#define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008
-#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009
-#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017
-#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103
-#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132
-#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200
-#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201
-#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203
-#define PCI_DEVICE_ID_SERVERWORKS_HT1000SB 0x0205
-#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211
-#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212
-#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213
-#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214
-#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217
-#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
-
-#define PCI_VENDOR_ID_SBE 0x1176
-#define PCI_DEVICE_ID_SBE_WANXL100 0x0301
-#define PCI_DEVICE_ID_SBE_WANXL200 0x0302
-#define PCI_DEVICE_ID_SBE_WANXL400 0x0104
-
-#define PCI_VENDOR_ID_TOSHIBA 0x1179
-#define PCI_DEVICE_ID_TOSHIBA_PICCOLO 0x0102
-#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1 0x0103
-#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_2 0x0105
-#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a
-#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f
-#define PCI_DEVICE_ID_TOSHIBA_TOPIC100 0x0617
-
-#define PCI_VENDOR_ID_TOSHIBA_2 0x102f
-#define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030
-#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108
-#define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3
-
-#define PCI_VENDOR_ID_RICOH 0x1180
-#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465
-#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466
-#define PCI_DEVICE_ID_RICOH_RL5C475 0x0475
-#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476
-#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
-#define PCI_DEVICE_ID_RICOH_R5C822 0x0822
-
-#define PCI_VENDOR_ID_DLINK 0x1186
-#define PCI_DEVICE_ID_DLINK_DGE510T 0x4c00
-
-#define PCI_VENDOR_ID_ARTOP 0x1191
-#define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005
-#define PCI_DEVICE_ID_ARTOP_ATP860 0x0006
-#define PCI_DEVICE_ID_ARTOP_ATP860R 0x0007
-#define PCI_DEVICE_ID_ARTOP_ATP865 0x0008
-#define PCI_DEVICE_ID_ARTOP_ATP865R 0x0009
-#define PCI_DEVICE_ID_ARTOP_AEC7610 0x8002
-#define PCI_DEVICE_ID_ARTOP_AEC7612UW 0x8010
-#define PCI_DEVICE_ID_ARTOP_AEC7612U 0x8020
-#define PCI_DEVICE_ID_ARTOP_AEC7612S 0x8030
-#define PCI_DEVICE_ID_ARTOP_AEC7612D 0x8040
-#define PCI_DEVICE_ID_ARTOP_AEC7612SUW 0x8050
-#define PCI_DEVICE_ID_ARTOP_8060 0x8060
-
-#define PCI_VENDOR_ID_ZEITNET 0x1193
-#define PCI_DEVICE_ID_ZEITNET_1221 0x0001
-#define PCI_DEVICE_ID_ZEITNET_1225 0x0002
-
-
-#define PCI_VENDOR_ID_FUJITSU_ME 0x119e
-#define PCI_DEVICE_ID_FUJITSU_FS155 0x0001
-#define PCI_DEVICE_ID_FUJITSU_FS50 0x0003
-
-#define PCI_SUBVENDOR_ID_KEYSPAN 0x11a9
-#define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334
-
-#define PCI_VENDOR_ID_MARVELL 0x11ab
-#define PCI_DEVICE_ID_MARVELL_GT64111 0x4146
-#define PCI_DEVICE_ID_MARVELL_GT64260 0x6430
-#define PCI_DEVICE_ID_MARVELL_MV64360 0x6460
-#define PCI_DEVICE_ID_MARVELL_MV64460 0x6480
-
-#define PCI_VENDOR_ID_V3 0x11b0
-#define PCI_DEVICE_ID_V3_V960 0x0001
-#define PCI_DEVICE_ID_V3_V351 0x0002
-
-
-#define PCI_VENDOR_ID_ATT 0x11c1
-#define PCI_DEVICE_ID_ATT_VENUS_MODEM 0x480
-
-
-#define PCI_VENDOR_ID_SPECIALIX 0x11cb
-#define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000
-#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000
-#define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004
-
-
-#define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4
-#define PCI_DEVICE_ID_AD1889JS 0x1889
-
-
-#define PCI_DEVICE_ID_SEGA_BBA 0x1234
-
-#define PCI_VENDOR_ID_ZORAN 0x11de
-#define PCI_DEVICE_ID_ZORAN_36057 0x6057
-#define PCI_DEVICE_ID_ZORAN_36120 0x6120
-
-
-#define PCI_VENDOR_ID_COMPEX 0x11f6
-#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
-
-#define PCI_VENDOR_ID_RP 0x11fe
-#define PCI_DEVICE_ID_RP32INTF 0x0001
-#define PCI_DEVICE_ID_RP8INTF 0x0002
-#define PCI_DEVICE_ID_RP16INTF 0x0003
-#define PCI_DEVICE_ID_RP4QUAD 0x0004
-#define PCI_DEVICE_ID_RP8OCTA 0x0005
-#define PCI_DEVICE_ID_RP8J 0x0006
-#define PCI_DEVICE_ID_RP4J 0x0007
-#define PCI_DEVICE_ID_RP8SNI 0x0008
-#define PCI_DEVICE_ID_RP16SNI 0x0009
-#define PCI_DEVICE_ID_RPP4 0x000A
-#define PCI_DEVICE_ID_RPP8 0x000B
-#define PCI_DEVICE_ID_RP4M 0x000D
-#define PCI_DEVICE_ID_RP2_232 0x000E
-#define PCI_DEVICE_ID_RP2_422 0x000F
-#define PCI_DEVICE_ID_URP32INTF 0x0801
-#define PCI_DEVICE_ID_URP8INTF 0x0802
-#define PCI_DEVICE_ID_URP16INTF 0x0803
-#define PCI_DEVICE_ID_URP8OCTA 0x0805
-#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C
-#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D
-#define PCI_DEVICE_ID_CRP16INTF 0x0903
-
-#define PCI_VENDOR_ID_CYCLADES 0x120e
-#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
-#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101
-#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102
-#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103
-#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104
-#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105
-#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200
-#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201
-#define PCI_DEVICE_ID_PC300_RX_2 0x0300
-#define PCI_DEVICE_ID_PC300_RX_1 0x0301
-#define PCI_DEVICE_ID_PC300_TE_2 0x0310
-#define PCI_DEVICE_ID_PC300_TE_1 0x0311
-#define PCI_DEVICE_ID_PC300_TE_M_2 0x0320
-#define PCI_DEVICE_ID_PC300_TE_M_1 0x0321
-
-#define PCI_VENDOR_ID_ESSENTIAL 0x120f
-#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001
-
-#define PCI_VENDOR_ID_O2 0x1217
-#define PCI_DEVICE_ID_O2_6729 0x6729
-#define PCI_DEVICE_ID_O2_6730 0x673a
-#define PCI_DEVICE_ID_O2_6832 0x6832
-#define PCI_DEVICE_ID_O2_6836 0x6836
-
-#define PCI_VENDOR_ID_3DFX 0x121a
-#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
-#define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002
-#define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003
-#define PCI_DEVICE_ID_3DFX_VOODOO3 0x0005
-#define PCI_DEVICE_ID_3DFX_VOODOO5 0x0009
-
-
-
-#define PCI_VENDOR_ID_AVM 0x1244
-#define PCI_DEVICE_ID_AVM_B1 0x0700
-#define PCI_DEVICE_ID_AVM_C4 0x0800
-#define PCI_DEVICE_ID_AVM_A1 0x0a00
-#define PCI_DEVICE_ID_AVM_A1_V2 0x0e00
-#define PCI_DEVICE_ID_AVM_C2 0x1100
-#define PCI_DEVICE_ID_AVM_T1 0x1200
-
-
-#define PCI_VENDOR_ID_STALLION 0x124d
-
-/* Allied Telesyn */
-#define PCI_VENDOR_ID_AT 0x1259
-#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701
-#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703
-
-#define PCI_VENDOR_ID_ESS 0x125d
-#define PCI_DEVICE_ID_ESS_ESS1968 0x1968
-#define PCI_DEVICE_ID_ESS_ESS1978 0x1978
-#define PCI_DEVICE_ID_ESS_ALLEGRO_1 0x1988
-#define PCI_DEVICE_ID_ESS_ALLEGRO 0x1989
-#define PCI_DEVICE_ID_ESS_CANYON3D_2LE 0x1990
-#define PCI_DEVICE_ID_ESS_CANYON3D_2 0x1992
-#define PCI_DEVICE_ID_ESS_MAESTRO3 0x1998
-#define PCI_DEVICE_ID_ESS_MAESTRO3_1 0x1999
-#define PCI_DEVICE_ID_ESS_MAESTRO3_HW 0x199a
-#define PCI_DEVICE_ID_ESS_MAESTRO3_2 0x199b
-
-#define PCI_VENDOR_ID_SATSAGEM 0x1267
-#define PCI_DEVICE_ID_SATSAGEM_NICCY 0x1016
-
-
-#define PCI_VENDOR_ID_ENSONIQ 0x1274
-#define PCI_DEVICE_ID_ENSONIQ_CT5880 0x5880
-#define PCI_DEVICE_ID_ENSONIQ_ES1370 0x5000
-#define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371
-
-#define PCI_VENDOR_ID_TRANSMETA 0x1279
-#define PCI_DEVICE_ID_EFFICEON 0x0060
-
-#define PCI_VENDOR_ID_ROCKWELL 0x127A
-
-#define PCI_VENDOR_ID_ITE 0x1283
-#define PCI_DEVICE_ID_ITE_8211 0x8211
-#define PCI_DEVICE_ID_ITE_8212 0x8212
-#define PCI_DEVICE_ID_ITE_8872 0x8872
-#define PCI_DEVICE_ID_ITE_IT8330G_0 0xe886
-
-/* formerly Platform Tech */
-#define PCI_DEVICE_ID_ESS_ESS0100 0x0100
-
-#define PCI_VENDOR_ID_ALTEON 0x12ae
-
-
-#define PCI_SUBVENDOR_ID_CONNECT_TECH 0x12c4
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232 0x0001
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232 0x0002
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232 0x0003
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485 0x0004
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4 0x0005
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485 0x0006
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2 0x0007
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485 0x0008
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6 0x0009
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1 0x000A
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1 0x000B
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ 0x000C
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_PTM 0x000D
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_NT960PCI 0x0100
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2 0x0201
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4 0x0202
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232 0x0300
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232 0x0301
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232 0x0302
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1 0x0310
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2 0x0311
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4 0x0312
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2 0x0320
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4 0x0321
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8 0x0322
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485 0x0330
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485 0x0331
-#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485 0x0332
-
-
-#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
-#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
-
-#define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0
-#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031
-#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021
-#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16 0x0011
-#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC 0x0041
-#define PCI_SUBVENDOR_ID_CHASE_PCIRAS 0x124D
-#define PCI_SUBDEVICE_ID_CHASE_PCIRAS4 0xF001
-#define PCI_SUBDEVICE_ID_CHASE_PCIRAS8 0xF010
-
-#define PCI_VENDOR_ID_AUREAL 0x12eb
-#define PCI_DEVICE_ID_AUREAL_VORTEX_1 0x0001
-#define PCI_DEVICE_ID_AUREAL_VORTEX_2 0x0002
-#define PCI_DEVICE_ID_AUREAL_ADVANTAGE 0x0003
-
-#define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8
-#define PCI_DEVICE_ID_LML_33R10 0x8a02
-
-
-#define PCI_VENDOR_ID_SIIG 0x131f
-#define PCI_SUBVENDOR_ID_SIIG 0x131f
-#define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000
-#define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001
-#define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002
-#define PCI_DEVICE_ID_SIIG_1S1P_10x_550 0x1010
-#define PCI_DEVICE_ID_SIIG_1S1P_10x_650 0x1011
-#define PCI_DEVICE_ID_SIIG_1S1P_10x_850 0x1012
-#define PCI_DEVICE_ID_SIIG_1P_10x 0x1020
-#define PCI_DEVICE_ID_SIIG_2P_10x 0x1021
-#define PCI_DEVICE_ID_SIIG_2S_10x_550 0x1030
-#define PCI_DEVICE_ID_SIIG_2S_10x_650 0x1031
-#define PCI_DEVICE_ID_SIIG_2S_10x_850 0x1032
-#define PCI_DEVICE_ID_SIIG_2S1P_10x_550 0x1034
-#define PCI_DEVICE_ID_SIIG_2S1P_10x_650 0x1035
-#define PCI_DEVICE_ID_SIIG_2S1P_10x_850 0x1036
-#define PCI_DEVICE_ID_SIIG_4S_10x_550 0x1050
-#define PCI_DEVICE_ID_SIIG_4S_10x_650 0x1051
-#define PCI_DEVICE_ID_SIIG_4S_10x_850 0x1052
-#define PCI_DEVICE_ID_SIIG_1S_20x_550 0x2000
-#define PCI_DEVICE_ID_SIIG_1S_20x_650 0x2001
-#define PCI_DEVICE_ID_SIIG_1S_20x_850 0x2002
-#define PCI_DEVICE_ID_SIIG_1P_20x 0x2020
-#define PCI_DEVICE_ID_SIIG_2P_20x 0x2021
-#define PCI_DEVICE_ID_SIIG_2S_20x_550 0x2030
-#define PCI_DEVICE_ID_SIIG_2S_20x_650 0x2031
-#define PCI_DEVICE_ID_SIIG_2S_20x_850 0x2032
-#define PCI_DEVICE_ID_SIIG_2P1S_20x_550 0x2040
-#define PCI_DEVICE_ID_SIIG_2P1S_20x_650 0x2041
-#define PCI_DEVICE_ID_SIIG_2P1S_20x_850 0x2042
-#define PCI_DEVICE_ID_SIIG_1S1P_20x_550 0x2010
-#define PCI_DEVICE_ID_SIIG_1S1P_20x_650 0x2011
-#define PCI_DEVICE_ID_SIIG_1S1P_20x_850 0x2012
-#define PCI_DEVICE_ID_SIIG_4S_20x_550 0x2050
-#define PCI_DEVICE_ID_SIIG_4S_20x_650 0x2051
-#define PCI_DEVICE_ID_SIIG_4S_20x_850 0x2052
-#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060
-#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061
-#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062
-#define PCI_DEVICE_ID_SIIG_8S_20x_550 0x2080
-#define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081
-#define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082
-#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050
-
-#define PCI_VENDOR_ID_RADISYS 0x1331
-
-#define PCI_VENDOR_ID_DOMEX 0x134a
-#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001
-
-#define PCI_VENDOR_ID_INTASHIELD 0x135a
-#define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80
-
-#define PCI_VENDOR_ID_QUATECH 0x135C
-#define PCI_DEVICE_ID_QUATECH_QSC100 0x0010
-#define PCI_DEVICE_ID_QUATECH_DSC100 0x0020
-#define PCI_DEVICE_ID_QUATECH_ESC100D 0x0050
-#define PCI_DEVICE_ID_QUATECH_ESC100M 0x0060
-
-#define PCI_VENDOR_ID_SEALEVEL 0x135e
-#define PCI_DEVICE_ID_SEALEVEL_U530 0x7101
-#define PCI_DEVICE_ID_SEALEVEL_UCOMM2 0x7201
-#define PCI_DEVICE_ID_SEALEVEL_UCOMM422 0x7402
-#define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202
-#define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401
-#define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801
-#define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804
-
-#define PCI_VENDOR_ID_HYPERCOPE 0x1365
-#define PCI_DEVICE_ID_HYPERCOPE_PLX 0x9050
-#define PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO 0x0104
-#define PCI_SUBDEVICE_ID_HYPERCOPE_ERGO 0x0106
-#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107
-#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108
-
-#define PCI_VENDOR_ID_KAWASAKI 0x136b
-#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01
-
-#define PCI_VENDOR_ID_CNET 0x1371
-#define PCI_DEVICE_ID_CNET_GIGACARD 0x434e
-
-#define PCI_VENDOR_ID_LMC 0x1376
-#define PCI_DEVICE_ID_LMC_HSSI 0x0003
-#define PCI_DEVICE_ID_LMC_DS3 0x0004
-#define PCI_DEVICE_ID_LMC_SSI 0x0005
-#define PCI_DEVICE_ID_LMC_T1 0x0006
-
-
-#define PCI_VENDOR_ID_NETGEAR 0x1385
-#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
-
-#define PCI_VENDOR_ID_APPLICOM 0x1389
-#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001
-#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002
-#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
-
-#define PCI_VENDOR_ID_MOXA 0x1393
-#define PCI_DEVICE_ID_MOXA_RC7000 0x0001
-#define PCI_DEVICE_ID_MOXA_CP102 0x1020
-#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021
-#define PCI_DEVICE_ID_MOXA_CP102U 0x1022
-#define PCI_DEVICE_ID_MOXA_C104 0x1040
-#define PCI_DEVICE_ID_MOXA_CP104U 0x1041
-#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042
-#define PCI_DEVICE_ID_MOXA_CT114 0x1140
-#define PCI_DEVICE_ID_MOXA_CP114 0x1141
-#define PCI_DEVICE_ID_MOXA_CP118U 0x1180
-#define PCI_DEVICE_ID_MOXA_CP132 0x1320
-#define PCI_DEVICE_ID_MOXA_CP132U 0x1321
-#define PCI_DEVICE_ID_MOXA_CP134U 0x1340
-#define PCI_DEVICE_ID_MOXA_C168 0x1680
-#define PCI_DEVICE_ID_MOXA_CP168U 0x1681
-
-#define PCI_VENDOR_ID_CCD 0x1397
-#define PCI_DEVICE_ID_CCD_2BD0 0x2bd0
-#define PCI_DEVICE_ID_CCD_B000 0xb000
-#define PCI_DEVICE_ID_CCD_B006 0xb006
-#define PCI_DEVICE_ID_CCD_B007 0xb007
-#define PCI_DEVICE_ID_CCD_B008 0xb008
-#define PCI_DEVICE_ID_CCD_B009 0xb009
-#define PCI_DEVICE_ID_CCD_B00A 0xb00a
-#define PCI_DEVICE_ID_CCD_B00B 0xb00b
-#define PCI_DEVICE_ID_CCD_B00C 0xb00c
-#define PCI_DEVICE_ID_CCD_B100 0xb100
-#define PCI_DEVICE_ID_CCD_B700 0xb700
-#define PCI_DEVICE_ID_CCD_B701 0xb701
-
-#define PCI_VENDOR_ID_EXAR 0x13a8
-#define PCI_DEVICE_ID_EXAR_XR17C152 0x0152
-#define PCI_DEVICE_ID_EXAR_XR17C154 0x0154
-#define PCI_DEVICE_ID_EXAR_XR17C158 0x0158
-
-#define PCI_VENDOR_ID_MICROGATE 0x13c0
-#define PCI_DEVICE_ID_MICROGATE_USC 0x0010
-#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030
-
-#define PCI_VENDOR_ID_3WARE 0x13C1
-#define PCI_DEVICE_ID_3WARE_1000 0x1000
-#define PCI_DEVICE_ID_3WARE_7000 0x1001
-#define PCI_DEVICE_ID_3WARE_9000 0x1002
-
-#define PCI_VENDOR_ID_IOMEGA 0x13ca
-#define PCI_DEVICE_ID_IOMEGA_BUZ 0x4231
-
-#define PCI_VENDOR_ID_ABOCOM 0x13D1
-#define PCI_DEVICE_ID_ABOCOM_2BD1 0x2BD1
-
-#define PCI_VENDOR_ID_CMEDIA 0x13f6
-#define PCI_DEVICE_ID_CMEDIA_CM8338A 0x0100
-#define PCI_DEVICE_ID_CMEDIA_CM8338B 0x0101
-#define PCI_DEVICE_ID_CMEDIA_CM8738 0x0111
-#define PCI_DEVICE_ID_CMEDIA_CM8738B 0x0112
-
-#define PCI_VENDOR_ID_LAVA 0x1407
-#define PCI_DEVICE_ID_LAVA_DSERIAL 0x0100 /* 2x 16550 */
-#define PCI_DEVICE_ID_LAVA_QUATRO_A 0x0101 /* 2x 16550, half of 4 port */
-#define PCI_DEVICE_ID_LAVA_QUATRO_B 0x0102 /* 2x 16550, half of 4 port */
-#define PCI_DEVICE_ID_LAVA_OCTO_A 0x0180 /* 4x 16550A, half of 8 port */
-#define PCI_DEVICE_ID_LAVA_OCTO_B 0x0181 /* 4x 16550A, half of 8 port */
-#define PCI_DEVICE_ID_LAVA_PORT_PLUS 0x0200 /* 2x 16650 */
-#define PCI_DEVICE_ID_LAVA_QUAD_A 0x0201 /* 2x 16650, half of 4 port */
-#define PCI_DEVICE_ID_LAVA_QUAD_B 0x0202 /* 2x 16650, half of 4 port */
-#define PCI_DEVICE_ID_LAVA_SSERIAL 0x0500 /* 1x 16550 */
-#define PCI_DEVICE_ID_LAVA_PORT_650 0x0600 /* 1x 16650 */
-#define PCI_DEVICE_ID_LAVA_PARALLEL 0x8000
-#define PCI_DEVICE_ID_LAVA_DUAL_PAR_A 0x8002 /* The Lava Dual Parallel is */
-#define PCI_DEVICE_ID_LAVA_DUAL_PAR_B 0x8003 /* two PCI devices on a card */
-#define PCI_DEVICE_ID_LAVA_BOCA_IOPPAR 0x8800
-
-#define PCI_VENDOR_ID_TIMEDIA 0x1409
-#define PCI_DEVICE_ID_TIMEDIA_1889 0x7168
-
-#define PCI_VENDOR_ID_ICE 0x1412
-#define PCI_DEVICE_ID_ICE_1712 0x1712
-#define PCI_DEVICE_ID_VT1724 0x1724
-
-#define PCI_VENDOR_ID_OXSEMI 0x1415
-#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
-#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501
-#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
-#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
-#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521
-
-#define PCI_VENDOR_ID_SAMSUNG 0x144d
-
-#define PCI_VENDOR_ID_MYRICOM 0x14c1
-
-#define PCI_VENDOR_ID_TITAN 0x14D2
-#define PCI_DEVICE_ID_TITAN_010L 0x8001
-#define PCI_DEVICE_ID_TITAN_100L 0x8010
-#define PCI_DEVICE_ID_TITAN_110L 0x8011
-#define PCI_DEVICE_ID_TITAN_200L 0x8020
-#define PCI_DEVICE_ID_TITAN_210L 0x8021
-#define PCI_DEVICE_ID_TITAN_400L 0x8040
-#define PCI_DEVICE_ID_TITAN_800L 0x8080
-#define PCI_DEVICE_ID_TITAN_100 0xA001
-#define PCI_DEVICE_ID_TITAN_200 0xA005
-#define PCI_DEVICE_ID_TITAN_400 0xA003
-#define PCI_DEVICE_ID_TITAN_800B 0xA004
-
-#define PCI_VENDOR_ID_PANACOM 0x14d4
-#define PCI_DEVICE_ID_PANACOM_QUADMODEM 0x0400
-#define PCI_DEVICE_ID_PANACOM_DUALMODEM 0x0402
-
-#define PCI_VENDOR_ID_SIPACKETS 0x14d9
-#define PCI_DEVICE_ID_SP1011 0x0010
-
-#define PCI_VENDOR_ID_AFAVLAB 0x14db
-#define PCI_DEVICE_ID_AFAVLAB_P028 0x2180
-#define PCI_DEVICE_ID_AFAVLAB_P030 0x2182
-#define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150
-
-#define PCI_VENDOR_ID_BROADCOM 0x14e4
-#define PCI_DEVICE_ID_TIGON3_5752 0x1600
-#define PCI_DEVICE_ID_TIGON3_5752M 0x1601
-#define PCI_DEVICE_ID_TIGON3_5700 0x1644
-#define PCI_DEVICE_ID_TIGON3_5701 0x1645
-#define PCI_DEVICE_ID_TIGON3_5702 0x1646
-#define PCI_DEVICE_ID_TIGON3_5703 0x1647
-#define PCI_DEVICE_ID_TIGON3_5704 0x1648
-#define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649
-#define PCI_DEVICE_ID_NX2_5706 0x164a
-#define PCI_DEVICE_ID_NX2_5708 0x164c
-#define PCI_DEVICE_ID_TIGON3_5702FE 0x164d
-#define PCI_DEVICE_ID_TIGON3_5705 0x1653
-#define PCI_DEVICE_ID_TIGON3_5705_2 0x1654
-#define PCI_DEVICE_ID_TIGON3_5720 0x1658
-#define PCI_DEVICE_ID_TIGON3_5721 0x1659
-#define PCI_DEVICE_ID_TIGON3_5722 0x165a
-#define PCI_DEVICE_ID_TIGON3_5705M 0x165d
-#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
-#define PCI_DEVICE_ID_TIGON3_5714 0x1668
-#define PCI_DEVICE_ID_TIGON3_5714S 0x1669
-#define PCI_DEVICE_ID_TIGON3_5780 0x166a
-#define PCI_DEVICE_ID_TIGON3_5780S 0x166b
-#define PCI_DEVICE_ID_TIGON3_5705F 0x166e
-#define PCI_DEVICE_ID_TIGON3_5754M 0x1672
-#define PCI_DEVICE_ID_TIGON3_5755M 0x1673
-#define PCI_DEVICE_ID_TIGON3_5756 0x1674
-#define PCI_DEVICE_ID_TIGON3_5750 0x1676
-#define PCI_DEVICE_ID_TIGON3_5751 0x1677
-#define PCI_DEVICE_ID_TIGON3_5715 0x1678
-#define PCI_DEVICE_ID_TIGON3_5715S 0x1679
-#define PCI_DEVICE_ID_TIGON3_5754 0x167a
-#define PCI_DEVICE_ID_TIGON3_5755 0x167b
-#define PCI_DEVICE_ID_TIGON3_5750M 0x167c
-#define PCI_DEVICE_ID_TIGON3_5751M 0x167d
-#define PCI_DEVICE_ID_TIGON3_5751F 0x167e
-#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
-#define PCI_DEVICE_ID_TIGON3_5782 0x1696
-#define PCI_DEVICE_ID_TIGON3_5786 0x169a
-#define PCI_DEVICE_ID_TIGON3_5787 0x169b
-#define PCI_DEVICE_ID_TIGON3_5788 0x169c
-#define PCI_DEVICE_ID_TIGON3_5789 0x169d
-#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6
-#define PCI_DEVICE_ID_TIGON3_5703X 0x16a7
-#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
-#define PCI_DEVICE_ID_NX2_5706S 0x16aa
-#define PCI_DEVICE_ID_NX2_5708S 0x16ac
-#define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6
-#define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7
-#define PCI_DEVICE_ID_TIGON3_5781 0x16dd
-#define PCI_DEVICE_ID_TIGON3_5753 0x16f7
-#define PCI_DEVICE_ID_TIGON3_5753M 0x16fd
-#define PCI_DEVICE_ID_TIGON3_5753F 0x16fe
-#define PCI_DEVICE_ID_TIGON3_5901 0x170d
-#define PCI_DEVICE_ID_BCM4401B1 0x170c
-#define PCI_DEVICE_ID_TIGON3_5901_2 0x170e
-#define PCI_DEVICE_ID_TIGON3_5906 0x1712
-#define PCI_DEVICE_ID_TIGON3_5906M 0x1713
-#define PCI_DEVICE_ID_BCM4401 0x4401
-#define PCI_DEVICE_ID_BCM4401B0 0x4402
-
-#define PCI_VENDOR_ID_TOPIC 0x151f
-#define PCI_DEVICE_ID_TOPIC_TP560 0x0000
-
-#define PCI_VENDOR_ID_ENE 0x1524
-#define PCI_DEVICE_ID_ENE_1211 0x1211
-#define PCI_DEVICE_ID_ENE_1225 0x1225
-#define PCI_DEVICE_ID_ENE_1410 0x1410
-#define PCI_DEVICE_ID_ENE_710 0x1411
-#define PCI_DEVICE_ID_ENE_712 0x1412
-#define PCI_DEVICE_ID_ENE_1420 0x1420
-#define PCI_DEVICE_ID_ENE_720 0x1421
-#define PCI_DEVICE_ID_ENE_722 0x1422
-
-#define PCI_VENDOR_ID_CHELSIO 0x1425
-
-
-#define PCI_VENDOR_ID_SYBA 0x1592
-#define PCI_DEVICE_ID_SYBA_2P_EPP 0x0782
-#define PCI_DEVICE_ID_SYBA_1P_ECP 0x0783
-
-#define PCI_VENDOR_ID_MORETON 0x15aa
-#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
-
-#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
-#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
-
-#define PCI_VENDOR_ID_MELLANOX 0x15b3
-#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44
-#define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46
-#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
-#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282
-#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
-#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274
-
-#define PCI_VENDOR_ID_PDC 0x15e9
-
-
-#define PCI_VENDOR_ID_FARSITE 0x1619
-#define PCI_DEVICE_ID_FARSITE_T2P 0x0400
-#define PCI_DEVICE_ID_FARSITE_T4P 0x0440
-#define PCI_DEVICE_ID_FARSITE_T1U 0x0610
-#define PCI_DEVICE_ID_FARSITE_T2U 0x0620
-#define PCI_DEVICE_ID_FARSITE_T4U 0x0640
-#define PCI_DEVICE_ID_FARSITE_TE1 0x1610
-#define PCI_DEVICE_ID_FARSITE_TE1C 0x1612
-
-#define PCI_VENDOR_ID_SIBYTE 0x166d
-#define PCI_DEVICE_ID_BCM1250_PCI 0x0001
-#define PCI_DEVICE_ID_BCM1250_HT 0x0002
-
-#define PCI_VENDOR_ID_NETCELL 0x169c
-#define PCI_DEVICE_ID_REVOLUTION 0x0044
-
-#define PCI_VENDOR_ID_VITESSE 0x1725
-#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174
-
-#define PCI_VENDOR_ID_LINKSYS 0x1737
-#define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064
-
-#define PCI_VENDOR_ID_ALTIMA 0x173b
-#define PCI_DEVICE_ID_ALTIMA_AC1000 0x03e8
-#define PCI_DEVICE_ID_ALTIMA_AC1001 0x03e9
-#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea
-#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb
-
-#define PCI_VENDOR_ID_ARECA 0x17d3
-#define PCI_DEVICE_ID_ARECA_1110 0x1110
-#define PCI_DEVICE_ID_ARECA_1120 0x1120
-#define PCI_DEVICE_ID_ARECA_1130 0x1130
-#define PCI_DEVICE_ID_ARECA_1160 0x1160
-#define PCI_DEVICE_ID_ARECA_1170 0x1170
-#define PCI_DEVICE_ID_ARECA_1210 0x1210
-#define PCI_DEVICE_ID_ARECA_1220 0x1220
-#define PCI_DEVICE_ID_ARECA_1230 0x1230
-#define PCI_DEVICE_ID_ARECA_1260 0x1260
-#define PCI_DEVICE_ID_ARECA_1270 0x1270
-#define PCI_DEVICE_ID_ARECA_1280 0x1280
-#define PCI_DEVICE_ID_ARECA_1380 0x1380
-#define PCI_DEVICE_ID_ARECA_1381 0x1381
-#define PCI_DEVICE_ID_ARECA_1680 0x1680
-#define PCI_DEVICE_ID_ARECA_1681 0x1681
-
-#define PCI_VENDOR_ID_S2IO 0x17d5
-#define PCI_DEVICE_ID_S2IO_WIN 0x5731
-#define PCI_DEVICE_ID_S2IO_UNI 0x5831
-#define PCI_DEVICE_ID_HERC_WIN 0x5732
-#define PCI_DEVICE_ID_HERC_UNI 0x5832
-
-
-#define PCI_VENDOR_ID_SITECOM 0x182d
-#define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069
-
-#define PCI_VENDOR_ID_TOPSPIN 0x1867
-
-#define PCI_VENDOR_ID_TDI 0x192E
-#define PCI_DEVICE_ID_TDI_EHCI 0x0101
-
-#define PCI_VENDOR_ID_JMICRON 0x197B
-#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
-#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
-#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
-#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
-#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
-#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
-
-#define PCI_VENDOR_ID_TEKRAM 0x1de1
-#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
-
-#define PCI_VENDOR_ID_HINT 0x3388
-#define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013
-
-#define PCI_VENDOR_ID_3DLABS 0x3d3d
-#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007
-#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009
-
-
-#define PCI_VENDOR_ID_AKS 0x416c
-#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100
-
-
-
-#define PCI_VENDOR_ID_S3 0x5333
-#define PCI_DEVICE_ID_S3_TRIO 0x8811
-#define PCI_DEVICE_ID_S3_868 0x8880
-#define PCI_DEVICE_ID_S3_968 0x88f0
-#define PCI_DEVICE_ID_S3_SAVAGE4 0x8a25
-#define PCI_DEVICE_ID_S3_PROSAVAGE8 0x8d04
-#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00
-
-#define PCI_VENDOR_ID_DUNORD 0x5544
-#define PCI_DEVICE_ID_DUNORD_I3000 0x0001
-
-
-#define PCI_VENDOR_ID_DCI 0x6666
-#define PCI_DEVICE_ID_DCI_PCCOM4 0x0001
-#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002
-#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004
-
-#define PCI_VENDOR_ID_INTEL 0x8086
-#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
-#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
-#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
-#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
-#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
-#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
-#define PCI_DEVICE_ID_INTEL_82375 0x0482
-#define PCI_DEVICE_ID_INTEL_82424 0x0483
-#define PCI_DEVICE_ID_INTEL_82378 0x0484
-#define PCI_DEVICE_ID_INTEL_I960 0x0960
-#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
-#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
-#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
-#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
-#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
-#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
-#define PCI_DEVICE_ID_INTEL_82437 0x122d
-#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
-#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230
-#define PCI_DEVICE_ID_INTEL_82371MX 0x1234
-#define PCI_DEVICE_ID_INTEL_82441 0x1237
-#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
-#define PCI_DEVICE_ID_INTEL_82439 0x1250
-#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
-#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
-#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
-#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
-#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
-#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
-#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
-#define PCI_DEVICE_ID_INTEL_82801AA_5 0x2415
-#define PCI_DEVICE_ID_INTEL_82801AA_6 0x2416
-#define PCI_DEVICE_ID_INTEL_82801AA_8 0x2418
-#define PCI_DEVICE_ID_INTEL_82801AB_0 0x2420
-#define PCI_DEVICE_ID_INTEL_82801AB_1 0x2421
-#define PCI_DEVICE_ID_INTEL_82801AB_3 0x2423
-#define PCI_DEVICE_ID_INTEL_82801AB_5 0x2425
-#define PCI_DEVICE_ID_INTEL_82801AB_6 0x2426
-#define PCI_DEVICE_ID_INTEL_82801AB_8 0x2428
-#define PCI_DEVICE_ID_INTEL_82801BA_0 0x2440
-#define PCI_DEVICE_ID_INTEL_82801BA_2 0x2443
-#define PCI_DEVICE_ID_INTEL_82801BA_4 0x2445
-#define PCI_DEVICE_ID_INTEL_82801BA_6 0x2448
-#define PCI_DEVICE_ID_INTEL_82801BA_8 0x244a
-#define PCI_DEVICE_ID_INTEL_82801BA_9 0x244b
-#define PCI_DEVICE_ID_INTEL_82801BA_10 0x244c
-#define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e
-#define PCI_DEVICE_ID_INTEL_82801E_0 0x2450
-#define PCI_DEVICE_ID_INTEL_82801E_11 0x245b
-#define PCI_DEVICE_ID_INTEL_82801CA_0 0x2480
-#define PCI_DEVICE_ID_INTEL_82801CA_3 0x2483
-#define PCI_DEVICE_ID_INTEL_82801CA_5 0x2485
-#define PCI_DEVICE_ID_INTEL_82801CA_6 0x2486
-#define PCI_DEVICE_ID_INTEL_82801CA_10 0x248a
-#define PCI_DEVICE_ID_INTEL_82801CA_11 0x248b
-#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c
-#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0
-#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1
-#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3
-#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5
-#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6
-#define PCI_DEVICE_ID_INTEL_82801DB_9 0x24c9
-#define PCI_DEVICE_ID_INTEL_82801DB_10 0x24ca
-#define PCI_DEVICE_ID_INTEL_82801DB_11 0x24cb
-#define PCI_DEVICE_ID_INTEL_82801DB_12 0x24cc
-#define PCI_DEVICE_ID_INTEL_82801EB_0 0x24d0
-#define PCI_DEVICE_ID_INTEL_82801EB_1 0x24d1
-#define PCI_DEVICE_ID_INTEL_82801EB_3 0x24d3
-#define PCI_DEVICE_ID_INTEL_82801EB_5 0x24d5
-#define PCI_DEVICE_ID_INTEL_82801EB_6 0x24d6
-#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db
-#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd
-#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
-#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
-#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
-#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
-#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
-#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500
-#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
-#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
-#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531
-#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c
-#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
-#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
-#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
-#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572
-#define PCI_DEVICE_ID_INTEL_82875_HB 0x2578
-#define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580
-#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
-#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590
-#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
-#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
-#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
-#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
-#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
-#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
-#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
-#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
-#define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a
-#define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d
-#define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e
-#define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f
-#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670
-#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698
-#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b
-#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e
-#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
-#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
-#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
-#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd
-#define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da
-#define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd
-#define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de
-#define PCI_DEVICE_ID_INTEL_ICH7_21 0x27df
-#define PCI_DEVICE_ID_INTEL_ICH8_0 0x2810
-#define PCI_DEVICE_ID_INTEL_ICH8_1 0x2811
-#define PCI_DEVICE_ID_INTEL_ICH8_2 0x2812
-#define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814
-#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
-#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
-#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
-#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
-#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
-#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
-#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
-#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
-#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
-#define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592
-#define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595
-#define PCI_DEVICE_ID_INTEL_MCH_PA1 0x3596
-#define PCI_DEVICE_ID_INTEL_MCH_PB 0x3597
-#define PCI_DEVICE_ID_INTEL_MCH_PB1 0x3598
-#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599
-#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
-#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
-#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
-#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
-#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
-#define PCI_DEVICE_ID_INTEL_82437VX 0x7030
-#define PCI_DEVICE_ID_INTEL_82439TX 0x7100
-#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110
-#define PCI_DEVICE_ID_INTEL_82371AB 0x7111
-#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112
-#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113
-#define PCI_DEVICE_ID_INTEL_82810_MC1 0x7120
-#define PCI_DEVICE_ID_INTEL_82810_IG1 0x7121
-#define PCI_DEVICE_ID_INTEL_82810_MC3 0x7122
-#define PCI_DEVICE_ID_INTEL_82810_IG3 0x7123
-#define PCI_DEVICE_ID_INTEL_82810E_MC 0x7124
-#define PCI_DEVICE_ID_INTEL_82810E_IG 0x7125
-#define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180
-#define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181
-#define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190
-#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191
-#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192
-#define PCI_DEVICE_ID_INTEL_440MX 0x7195
-#define PCI_DEVICE_ID_INTEL_440MX_6 0x7196
-#define PCI_DEVICE_ID_INTEL_82443MX_0 0x7198
-#define PCI_DEVICE_ID_INTEL_82443MX_1 0x7199
-#define PCI_DEVICE_ID_INTEL_82443MX_3 0x719b
-#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0
-#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2
-#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
-#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4
-#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
-#define PCI_DEVICE_ID_INTEL_82451NX 0x84ca
-#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb
-#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
-#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
-#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
-#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
-
-#define PCI_VENDOR_ID_SCALEMP 0x8686
-#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010
-
-#define PCI_VENDOR_ID_COMPUTONE 0x8e0e
-#define PCI_DEVICE_ID_COMPUTONE_IP2EX 0x0291
-#define PCI_DEVICE_ID_COMPUTONE_PG 0x0302
-#define PCI_SUBVENDOR_ID_COMPUTONE 0x8e0e
-#define PCI_SUBDEVICE_ID_COMPUTONE_PG4 0x0001
-#define PCI_SUBDEVICE_ID_COMPUTONE_PG8 0x0002
-#define PCI_SUBDEVICE_ID_COMPUTONE_PG6 0x0003
-
-#define PCI_VENDOR_ID_KTI 0x8e2e
-
-#define PCI_VENDOR_ID_ADAPTEC 0x9004
-#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078
-#define PCI_DEVICE_ID_ADAPTEC_7821 0x2178
-#define PCI_DEVICE_ID_ADAPTEC_38602 0x3860
-#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078
-#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578
-#define PCI_DEVICE_ID_ADAPTEC_3860 0x6038
-#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075
-#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078
-#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178
-#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078
-#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178
-#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278
-#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378
-#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478
-#define PCI_DEVICE_ID_ADAPTEC_7895 0x7895
-#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078
-#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178
-#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278
-#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378
-#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478
-#define PCI_DEVICE_ID_ADAPTEC_7885 0x8578
-#define PCI_DEVICE_ID_ADAPTEC_7886 0x8678
-#define PCI_DEVICE_ID_ADAPTEC_7887 0x8778
-#define PCI_DEVICE_ID_ADAPTEC_7888 0x8878
-
-#define PCI_VENDOR_ID_ADAPTEC2 0x9005
-#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010
-#define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011
-#define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013
-#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f
-#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050
-#define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051
-#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f
-#define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080
-#define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081
-#define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083
-#define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f
-#define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0
-#define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1
-#define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3
-#define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf
-#define PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN 0x0500
-#define PCI_DEVICE_ID_ADAPTEC2_SCAMP 0x0503
-
-
-#define PCI_VENDOR_ID_HOLTEK 0x9412
-#define PCI_DEVICE_ID_HOLTEK_6565 0x6565
-
-#define PCI_VENDOR_ID_NETMOS 0x9710
-#define PCI_DEVICE_ID_NETMOS_9705 0x9705
-#define PCI_DEVICE_ID_NETMOS_9715 0x9715
-#define PCI_DEVICE_ID_NETMOS_9735 0x9735
-#define PCI_DEVICE_ID_NETMOS_9745 0x9745
-#define PCI_DEVICE_ID_NETMOS_9755 0x9755
-#define PCI_DEVICE_ID_NETMOS_9805 0x9805
-#define PCI_DEVICE_ID_NETMOS_9815 0x9815
-#define PCI_DEVICE_ID_NETMOS_9835 0x9835
-#define PCI_DEVICE_ID_NETMOS_9845 0x9845
-#define PCI_DEVICE_ID_NETMOS_9855 0x9855
-
-#define PCI_SUBVENDOR_ID_EXSYS 0xd84d
-#define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014
-#define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055
-
-#define PCI_VENDOR_ID_TIGERJET 0xe159
-#define PCI_DEVICE_ID_TIGERJET_300 0x0001
-#define PCI_DEVICE_ID_TIGERJET_100 0x0002
-
-#define PCI_VENDOR_ID_TTTECH 0x0357
-#define PCI_DEVICE_ID_TTTECH_MC322 0x000A
-
-#define PCI_VENDOR_ID_XILINX_RME 0xea60
-#define PCI_DEVICE_ID_RME_DIGI32 0x9896
-#define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897
-#define PCI_DEVICE_ID_RME_DIGI32_8 0x9898
-
-#define PCI_VENDOR_ID_QUICKNET 0x15E2
-#define PCI_DEVICE_ID_QUICKNET_XJ 0x0500
diff --git a/xen/include/asm-ia64/linux/pci_regs.h b/xen/include/asm-ia64/linux/pci_regs.h
deleted file mode 100644
index c05dc22641..0000000000
--- a/xen/include/asm-ia64/linux/pci_regs.h
+++ /dev/null
@@ -1,488 +0,0 @@
-/*
- * pci_regs.h
- *
- * PCI standard defines
- * Copyright 1994, Drew Eckhardt
- * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
- *
- * For more information, please consult the following manuals (look at
- * http://www.pcisig.com/ for how to get them):
- *
- * PCI BIOS Specification
- * PCI Local Bus Specification
- * PCI to PCI Bridge Specification
- * PCI System Design Guide
- *
- * For hypertransport information, please consult the following manuals
- * from http://www.hypertransport.org
- *
- * The Hypertransport I/O Link Specification
- */
-
-#ifndef LINUX_PCI_REGS_H
-#define LINUX_PCI_REGS_H
-
-/*
- * Under PCI, each device has 256 bytes of configuration address space,
- * of which the first 64 bytes are standardized as follows:
- */
-#define PCI_VENDOR_ID 0x00 /* 16 bits */
-#define PCI_DEVICE_ID 0x02 /* 16 bits */
-#define PCI_COMMAND 0x04 /* 16 bits */
-#define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */
-#define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */
-#define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */
-#define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */
-#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */
-#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */
-#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */
-#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
-#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
-#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
-#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
-
-#define PCI_STATUS 0x06 /* 16 bits */
-#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
-#define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */
-#define PCI_STATUS_UDF 0x40 /* Support User Definable Features [obsolete] */
-#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
-#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
-#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
-#define PCI_STATUS_DEVSEL_FAST 0x000
-#define PCI_STATUS_DEVSEL_MEDIUM 0x200
-#define PCI_STATUS_DEVSEL_SLOW 0x400
-#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
-#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
-#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
-#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
-#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */
-
-#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */
-#define PCI_REVISION_ID 0x08 /* Revision ID */
-#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */
-#define PCI_CLASS_DEVICE 0x0a /* Device class */
-
-#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
-#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
-#define PCI_HEADER_TYPE 0x0e /* 8 bits */
-#define PCI_HEADER_TYPE_NORMAL 0
-#define PCI_HEADER_TYPE_BRIDGE 1
-#define PCI_HEADER_TYPE_CARDBUS 2
-
-#define PCI_BIST 0x0f /* 8 bits */
-#define PCI_BIST_CODE_MASK 0x0f /* Return result */
-#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */
-#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */
-
-/*
- * Base addresses specify locations in memory or I/O space.
- * Decoded size can be determined by writing a value of
- * 0xffffffff to the register, and reading it back. Only
- * 1 bits are decoded.
- */
-#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
-#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */
-#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */
-#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */
-#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */
-#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */
-#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
-#define PCI_BASE_ADDRESS_SPACE_IO 0x01
-#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
-#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
-#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
-#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */
-#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
-#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
-#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL)
-#define PCI_BASE_ADDRESS_IO_MASK (~0x03UL)
-/* bit 1 is reserved if address_space = 1 */
-
-/* Header type 0 (normal devices) */
-#define PCI_CARDBUS_CIS 0x28
-#define PCI_SUBSYSTEM_VENDOR_ID 0x2c
-#define PCI_SUBSYSTEM_ID 0x2e
-#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
-#define PCI_ROM_ADDRESS_ENABLE 0x01
-#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
-
-#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
-
-/* 0x35-0x3b are reserved */
-#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
-#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */
-#define PCI_MIN_GNT 0x3e /* 8 bits */
-#define PCI_MAX_LAT 0x3f /* 8 bits */
-
-/* Header type 1 (PCI-to-PCI bridges) */
-#define PCI_PRIMARY_BUS 0x18 /* Primary bus number */
-#define PCI_SECONDARY_BUS 0x19 /* Secondary bus number */
-#define PCI_SUBORDINATE_BUS 0x1a /* Highest bus number behind the bridge */
-#define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */
-#define PCI_IO_BASE 0x1c /* I/O range behind the bridge */
-#define PCI_IO_LIMIT 0x1d
-#define PCI_IO_RANGE_TYPE_MASK 0x0fUL /* I/O bridging type */
-#define PCI_IO_RANGE_TYPE_16 0x00
-#define PCI_IO_RANGE_TYPE_32 0x01
-#define PCI_IO_RANGE_MASK (~0x0fUL)
-#define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */
-#define PCI_MEMORY_BASE 0x20 /* Memory range behind */
-#define PCI_MEMORY_LIMIT 0x22
-#define PCI_MEMORY_RANGE_TYPE_MASK 0x0fUL
-#define PCI_MEMORY_RANGE_MASK (~0x0fUL)
-#define PCI_PREF_MEMORY_BASE 0x24 /* Prefetchable memory range behind */
-#define PCI_PREF_MEMORY_LIMIT 0x26
-#define PCI_PREF_RANGE_TYPE_MASK 0x0fUL
-#define PCI_PREF_RANGE_TYPE_32 0x00
-#define PCI_PREF_RANGE_TYPE_64 0x01
-#define PCI_PREF_RANGE_MASK (~0x0fUL)
-#define PCI_PREF_BASE_UPPER32 0x28 /* Upper half of prefetchable memory range */
-#define PCI_PREF_LIMIT_UPPER32 0x2c
-#define PCI_IO_BASE_UPPER16 0x30 /* Upper half of I/O addresses */
-#define PCI_IO_LIMIT_UPPER16 0x32
-/* 0x34 same as for htype 0 */
-/* 0x35-0x3b is reserved */
-#define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */
-/* 0x3c-0x3d are same as for htype 0 */
-#define PCI_BRIDGE_CONTROL 0x3e
-#define PCI_BRIDGE_CTL_PARITY 0x01 /* Enable parity detection on secondary interface */
-#define PCI_BRIDGE_CTL_SERR 0x02 /* The same for SERR forwarding */
-#define PCI_BRIDGE_CTL_NO_ISA 0x04 /* Disable bridging of ISA ports */
-#define PCI_BRIDGE_CTL_VGA 0x08 /* Forward VGA addresses */
-#define PCI_BRIDGE_CTL_MASTER_ABORT 0x20 /* Report master aborts */
-#define PCI_BRIDGE_CTL_BUS_RESET 0x40 /* Secondary bus reset */
-#define PCI_BRIDGE_CTL_FAST_BACK 0x80 /* Fast Back2Back enabled on secondary interface */
-
-/* Header type 2 (CardBus bridges) */
-#define PCI_CB_CAPABILITY_LIST 0x14
-/* 0x15 reserved */
-#define PCI_CB_SEC_STATUS 0x16 /* Secondary status */
-#define PCI_CB_PRIMARY_BUS 0x18 /* PCI bus number */
-#define PCI_CB_CARD_BUS 0x19 /* CardBus bus number */
-#define PCI_CB_SUBORDINATE_BUS 0x1a /* Subordinate bus number */
-#define PCI_CB_LATENCY_TIMER 0x1b /* CardBus latency timer */
-#define PCI_CB_MEMORY_BASE_0 0x1c
-#define PCI_CB_MEMORY_LIMIT_0 0x20
-#define PCI_CB_MEMORY_BASE_1 0x24
-#define PCI_CB_MEMORY_LIMIT_1 0x28
-#define PCI_CB_IO_BASE_0 0x2c
-#define PCI_CB_IO_BASE_0_HI 0x2e
-#define PCI_CB_IO_LIMIT_0 0x30
-#define PCI_CB_IO_LIMIT_0_HI 0x32
-#define PCI_CB_IO_BASE_1 0x34
-#define PCI_CB_IO_BASE_1_HI 0x36
-#define PCI_CB_IO_LIMIT_1 0x38
-#define PCI_CB_IO_LIMIT_1_HI 0x3a
-#define PCI_CB_IO_RANGE_MASK (~0x03UL)
-/* 0x3c-0x3d are same as for htype 0 */
-#define PCI_CB_BRIDGE_CONTROL 0x3e
-#define PCI_CB_BRIDGE_CTL_PARITY 0x01 /* Similar to standard bridge control register */
-#define PCI_CB_BRIDGE_CTL_SERR 0x02
-#define PCI_CB_BRIDGE_CTL_ISA 0x04
-#define PCI_CB_BRIDGE_CTL_VGA 0x08
-#define PCI_CB_BRIDGE_CTL_MASTER_ABORT 0x20
-#define PCI_CB_BRIDGE_CTL_CB_RESET 0x40 /* CardBus reset */
-#define PCI_CB_BRIDGE_CTL_16BIT_INT 0x80 /* Enable interrupt for 16-bit cards */
-#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100 /* Prefetch enable for both memory regions */
-#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200
-#define PCI_CB_BRIDGE_CTL_POST_WRITES 0x400
-#define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40
-#define PCI_CB_SUBSYSTEM_ID 0x42
-#define PCI_CB_LEGACY_MODE_BASE 0x44 /* 16-bit PC Card legacy mode base address (ExCa) */
-/* 0x48-0x7f reserved */
-
-/* Capability lists */
-
-#define PCI_CAP_LIST_ID 0 /* Capability ID */
-#define PCI_CAP_ID_PM 0x01 /* Power Management */
-#define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */
-#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
-#define PCI_CAP_ID_SLOTID 0x04 /* Slot Identification */
-#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */
-#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */
-#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */
-#define PCI_CAP_ID_HT 0x08 /* HyperTransport */
-#define PCI_CAP_ID_VNDR 0x09 /* Vendor specific capability */
-#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
-#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
-#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
-#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
-#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
-#define PCI_CAP_SIZEOF 4
-
-/* Power Management Registers */
-
-#define PCI_PM_PMC 2 /* PM Capabilities Register */
-#define PCI_PM_CAP_VER_MASK 0x0007 /* Version */
-#define PCI_PM_CAP_PME_CLOCK 0x0008 /* PME clock required */
-#define PCI_PM_CAP_RESERVED 0x0010 /* Reserved field */
-#define PCI_PM_CAP_DSI 0x0020 /* Device specific initialization */
-#define PCI_PM_CAP_AUX_POWER 0x01C0 /* Auxilliary power support mask */
-#define PCI_PM_CAP_D1 0x0200 /* D1 power state support */
-#define PCI_PM_CAP_D2 0x0400 /* D2 power state support */
-#define PCI_PM_CAP_PME 0x0800 /* PME pin supported */
-#define PCI_PM_CAP_PME_MASK 0xF800 /* PME Mask of all supported states */
-#define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */
-#define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */
-#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
-#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
-#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
-#define PCI_PM_CTRL 4 /* PM control and status register */
-#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
-#define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */
-#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
-#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */
-#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */
-#define PCI_PM_CTRL_PME_STATUS 0x8000 /* PME pin status */
-#define PCI_PM_PPB_EXTENSIONS 6 /* PPB support extensions (??) */
-#define PCI_PM_PPB_B2_B3 0x40 /* Stop clock when in D3hot (??) */
-#define PCI_PM_BPCC_ENABLE 0x80 /* Bus power/clock control enable (??) */
-#define PCI_PM_DATA_REGISTER 7 /* (??) */
-#define PCI_PM_SIZEOF 8
-
-/* AGP registers */
-
-#define PCI_AGP_VERSION 2 /* BCD version number */
-#define PCI_AGP_RFU 3 /* Rest of capability flags */
-#define PCI_AGP_STATUS 4 /* Status register */
-#define PCI_AGP_STATUS_RQ_MASK 0xff000000 /* Maximum number of requests - 1 */
-#define PCI_AGP_STATUS_SBA 0x0200 /* Sideband addressing supported */
-#define PCI_AGP_STATUS_64BIT 0x0020 /* 64-bit addressing supported */
-#define PCI_AGP_STATUS_FW 0x0010 /* FW transfers supported */
-#define PCI_AGP_STATUS_RATE4 0x0004 /* 4x transfer rate supported */
-#define PCI_AGP_STATUS_RATE2 0x0002 /* 2x transfer rate supported */
-#define PCI_AGP_STATUS_RATE1 0x0001 /* 1x transfer rate supported */
-#define PCI_AGP_COMMAND 8 /* Control register */
-#define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */
-#define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */
-#define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */
-#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */
-#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */
-#define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */
-#define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */
-#define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */
-#define PCI_AGP_SIZEOF 12
-
-/* Vital Product Data */
-
-#define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */
-#define PCI_VPD_ADDR_MASK 0x7fff /* Address mask */
-#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
-#define PCI_VPD_DATA 4 /* 32-bits of data returned here */
-
-/* Slot Identification */
-
-#define PCI_SID_ESR 2 /* Expansion Slot Register */
-#define PCI_SID_ESR_NSLOTS 0x1f /* Number of expansion slots available */
-#define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */
-#define PCI_SID_CHASSIS_NR 3 /* Chassis Number */
-
-/* Message Signalled Interrupts registers */
-
-#define PCI_MSI_FLAGS 2 /* Various flags */
-#define PCI_MSI_FLAGS_64BIT 0x80 /* 64-bit addresses allowed */
-#define PCI_MSI_FLAGS_QSIZE 0x70 /* Message queue size configured */
-#define PCI_MSI_FLAGS_QMASK 0x0e /* Maximum queue size available */
-#define PCI_MSI_FLAGS_ENABLE 0x01 /* MSI feature enabled */
-#define PCI_MSI_FLAGS_MASKBIT 0x100 /* 64-bit mask bits allowed */
-#define PCI_MSI_RFU 3 /* Rest of capability flags */
-#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
-#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
-#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
-#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
-#define PCI_MSI_MASK_BIT 16 /* Mask bits register */
-
-/* CompactPCI Hotswap Register */
-
-#define PCI_CHSWP_CSR 2 /* Control and Status Register */
-#define PCI_CHSWP_DHA 0x01 /* Device Hiding Arm */
-#define PCI_CHSWP_EIM 0x02 /* ENUM# Signal Mask */
-#define PCI_CHSWP_PIE 0x04 /* Pending Insert or Extract */
-#define PCI_CHSWP_LOO 0x08 /* LED On / Off */
-#define PCI_CHSWP_PI 0x30 /* Programming Interface */
-#define PCI_CHSWP_EXT 0x40 /* ENUM# status - extraction */
-#define PCI_CHSWP_INS 0x80 /* ENUM# status - insertion */
-
-/* PCI-X registers */
-
-#define PCI_X_CMD 2 /* Modes & Features */
-#define PCI_X_CMD_DPERR_E 0x0001 /* Data Parity Error Recovery Enable */
-#define PCI_X_CMD_ERO 0x0002 /* Enable Relaxed Ordering */
-#define PCI_X_CMD_MAX_READ 0x000c /* Max Memory Read Byte Count */
-#define PCI_X_CMD_MAX_SPLIT 0x0070 /* Max Outstanding Split Transactions */
-#define PCI_X_CMD_VERSION(x) (((x) >> 12) & 3) /* Version */
-#define PCI_X_STATUS 4 /* PCI-X capabilities */
-#define PCI_X_STATUS_DEVFN 0x000000ff /* A copy of devfn */
-#define PCI_X_STATUS_BUS 0x0000ff00 /* A copy of bus nr */
-#define PCI_X_STATUS_64BIT 0x00010000 /* 64-bit device */
-#define PCI_X_STATUS_133MHZ 0x00020000 /* 133 MHz capable */
-#define PCI_X_STATUS_SPL_DISC 0x00040000 /* Split Completion Discarded */
-#define PCI_X_STATUS_UNX_SPL 0x00080000 /* Unexpected Split Completion */
-#define PCI_X_STATUS_COMPLEX 0x00100000 /* Device Complexity */
-#define PCI_X_STATUS_MAX_READ 0x00600000 /* Designed Max Memory Read Count */
-#define PCI_X_STATUS_MAX_SPLIT 0x03800000 /* Designed Max Outstanding Split Transactions */
-#define PCI_X_STATUS_MAX_CUM 0x1c000000 /* Designed Max Cumulative Read Size */
-#define PCI_X_STATUS_SPL_ERR 0x20000000 /* Rcvd Split Completion Error Msg */
-#define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */
-#define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */
-
-/* PCI Express capability registers */
-
-#define PCI_EXP_FLAGS 2 /* Capabilities register */
-#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
-#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
-#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
-#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
-#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
-#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
-#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
-#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
-#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
-#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
-#define PCI_EXP_DEVCAP 4 /* Device capabilities */
-#define PCI_EXP_DEVCAP_PAYLOAD 0x07 /* Max_Payload_Size */
-#define PCI_EXP_DEVCAP_PHANTOM 0x18 /* Phantom functions */
-#define PCI_EXP_DEVCAP_EXT_TAG 0x20 /* Extended tags */
-#define PCI_EXP_DEVCAP_L0S 0x1c0 /* L0s Acceptable Latency */
-#define PCI_EXP_DEVCAP_L1 0xe00 /* L1 Acceptable Latency */
-#define PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */
-#define PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */
-#define PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */
-#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */
-#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */
-#define PCI_EXP_DEVCTL 8 /* Device Control */
-#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
-#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
-#define PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */
-#define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */
-#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
-#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
-#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
-#define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */
-#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
-#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */
-#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
-#define PCI_EXP_DEVSTA 10 /* Device Status */
-#define PCI_EXP_DEVSTA_CED 0x01 /* Correctable Error Detected */
-#define PCI_EXP_DEVSTA_NFED 0x02 /* Non-Fatal Error Detected */
-#define PCI_EXP_DEVSTA_FED 0x04 /* Fatal Error Detected */
-#define PCI_EXP_DEVSTA_URD 0x08 /* Unsupported Request Detected */
-#define PCI_EXP_DEVSTA_AUXPD 0x10 /* AUX Power Detected */
-#define PCI_EXP_DEVSTA_TRPND 0x20 /* Transactions Pending */
-#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
-#define PCI_EXP_LNKCTL 16 /* Link Control */
-#define PCI_EXP_LNKCTL_CLKREQ_EN 0x100 /* Enable clkreq */
-#define PCI_EXP_LNKSTA 18 /* Link Status */
-#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
-#define PCI_EXP_SLTCTL 24 /* Slot Control */
-#define PCI_EXP_SLTSTA 26 /* Slot Status */
-#define PCI_EXP_RTCTL 28 /* Root Control */
-#define PCI_EXP_RTCTL_SECEE 0x01 /* System Error on Correctable Error */
-#define PCI_EXP_RTCTL_SENFEE 0x02 /* System Error on Non-Fatal Error */
-#define PCI_EXP_RTCTL_SEFEE 0x04 /* System Error on Fatal Error */
-#define PCI_EXP_RTCTL_PMEIE 0x08 /* PME Interrupt Enable */
-#define PCI_EXP_RTCTL_CRSSVE 0x10 /* CRS Software Visibility Enable */
-#define PCI_EXP_RTCAP 30 /* Root Capabilities */
-#define PCI_EXP_RTSTA 32 /* Root Status */
-
-/* Extended Capabilities (PCI-X 2.0 and Express) */
-#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
-#define PCI_EXT_CAP_VER(header) ((header >> 16) & 0xf)
-#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
-
-#define PCI_EXT_CAP_ID_ERR 1
-#define PCI_EXT_CAP_ID_VC 2
-#define PCI_EXT_CAP_ID_DSN 3
-#define PCI_EXT_CAP_ID_PWR 4
-
-/* Advanced Error Reporting */
-#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
-#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */
-#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
-#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
-#define PCI_ERR_UNC_FCP 0x00002000 /* Flow Control Protocol */
-#define PCI_ERR_UNC_COMP_TIME 0x00004000 /* Completion Timeout */
-#define PCI_ERR_UNC_COMP_ABORT 0x00008000 /* Completer Abort */
-#define PCI_ERR_UNC_UNX_COMP 0x00010000 /* Unexpected Completion */
-#define PCI_ERR_UNC_RX_OVER 0x00020000 /* Receiver Overflow */
-#define PCI_ERR_UNC_MALF_TLP 0x00040000 /* Malformed TLP */
-#define PCI_ERR_UNC_ECRC 0x00080000 /* ECRC Error Status */
-#define PCI_ERR_UNC_UNSUP 0x00100000 /* Unsupported Request */
-#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
- /* Same bits as above */
-#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
- /* Same bits as above */
-#define PCI_ERR_COR_STATUS 16 /* Correctable Error Status */
-#define PCI_ERR_COR_RCVR 0x00000001 /* Receiver Error Status */
-#define PCI_ERR_COR_BAD_TLP 0x00000040 /* Bad TLP Status */
-#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
-#define PCI_ERR_COR_REP_ROLL 0x00000100 /* REPLAY_NUM Rollover */
-#define PCI_ERR_COR_REP_TIMER 0x00001000 /* Replay Timer Timeout */
-#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
- /* Same bits as above */
-#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
-#define PCI_ERR_CAP_FEP(x) ((x) & 31) /* First Error Pointer */
-#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
-#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
-#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
-#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
-#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
-#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
-/* Correctable Err Reporting Enable */
-#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001
-/* Non-fatal Err Reporting Enable */
-#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002
-/* Fatal Err Reporting Enable */
-#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004
-#define PCI_ERR_ROOT_STATUS 48
-#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
-/* Multi ERR_COR Received */
-#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002
-/* ERR_FATAL/NONFATAL Recevied */
-#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004
-/* Multi ERR_FATAL/NONFATAL Recevied */
-#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008
-#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First Fatal */
-#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
-#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
-#define PCI_ERR_ROOT_COR_SRC 52
-#define PCI_ERR_ROOT_SRC 54
-
-/* Virtual Channel */
-#define PCI_VC_PORT_REG1 4
-#define PCI_VC_PORT_REG2 8
-#define PCI_VC_PORT_CTRL 12
-#define PCI_VC_PORT_STATUS 14
-#define PCI_VC_RES_CAP 16
-#define PCI_VC_RES_CTRL 20
-#define PCI_VC_RES_STATUS 26
-
-/* Power Budgeting */
-#define PCI_PWR_DSR 4 /* Data Select Register */
-#define PCI_PWR_DATA 8 /* Data Register */
-#define PCI_PWR_DATA_BASE(x) ((x) & 0xff) /* Base Power */
-#define PCI_PWR_DATA_SCALE(x) (((x) >> 8) & 3) /* Data Scale */
-#define PCI_PWR_DATA_PM_SUB(x) (((x) >> 10) & 7) /* PM Sub State */
-#define PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
-#define PCI_PWR_DATA_TYPE(x) (((x) >> 15) & 7) /* Type */
-#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
-#define PCI_PWR_CAP 12 /* Capability */
-#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
-
-/* Hypertransport sub capability types */
-#define HT_CAPTYPE_SLAVE 0x00 /* Slave/Primary link configuration */
-#define HT_CAPTYPE_HOST 0x20 /* Host/Secondary link configuration */
-#define HT_CAPTYPE_IRQ 0x80 /* IRQ Configuration */
-#define HT_CAPTYPE_REMAPPING_40 0xA0 /* 40 bit address remapping */
-#define HT_CAPTYPE_REMAPPING_64 0xA2 /* 64 bit address remapping */
-#define HT_CAPTYPE_UNITID_CLUMP 0x90 /* Unit ID clumping */
-#define HT_CAPTYPE_EXTCONF 0x98 /* Extended Configuration Space Access */
-#define HT_CAPTYPE_MSI_MAPPING 0xA8 /* MSI Mapping Capability */
-#define HT_CAPTYPE_DIRECT_ROUTE 0xB0 /* Direct routing configuration */
-#define HT_CAPTYPE_VCSET 0xB8 /* Virtual Channel configuration */
-#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */
-#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */
-#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */
-
-
-#endif /* LINUX_PCI_REGS_H */
diff --git a/xen/include/asm-ia64/linux/percpu.h b/xen/include/asm-ia64/linux/percpu.h
deleted file mode 100644
index 5451eb1e78..0000000000
--- a/xen/include/asm-ia64/linux/percpu.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef __LINUX_PERCPU_H
-#define __LINUX_PERCPU_H
-#include <linux/spinlock.h> /* For preempt_disable() */
-#include <linux/slab.h> /* For kmalloc() */
-#include <linux/smp.h>
-#include <linux/string.h> /* For memset() */
-#include <asm/percpu.h>
-
-/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
-#ifndef PERCPU_ENOUGH_ROOM
-#define PERCPU_ENOUGH_ROOM 32768
-#endif
-
-/* Must be an lvalue. */
-#define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); }))
-#define put_cpu_var(var) preempt_enable()
-
-#ifdef CONFIG_SMP
-
-struct percpu_data {
- void *ptrs[NR_CPUS];
- void *blkp;
-};
-
-/*
- * Use this to get to a cpu's version of the per-cpu object allocated using
- * alloc_percpu. Non-atomic access to the current CPU's version should
- * probably be combined with get_cpu()/put_cpu().
- */
-#define per_cpu_ptr(ptr, cpu) \
-({ \
- struct percpu_data *__p = (struct percpu_data *)~(unsigned long)(ptr); \
- (__typeof__(ptr))__p->ptrs[(cpu)]; \
-})
-
-extern void *__alloc_percpu(size_t size, size_t align);
-extern void free_percpu(const void *);
-
-#else /* CONFIG_SMP */
-
-#define per_cpu_ptr(ptr, cpu) (ptr)
-
-static inline void *__alloc_percpu(size_t size, size_t align)
-{
- void *ret = kmalloc(size, GFP_KERNEL);
- if (ret)
- memset(ret, 0, size);
- return ret;
-}
-static inline void free_percpu(const void *ptr)
-{
- kfree(ptr);
-}
-
-#endif /* CONFIG_SMP */
-
-/* Simple wrapper for the common case: zeros memory. */
-#define alloc_percpu(type) \
- ((type *)(__alloc_percpu(sizeof(type), __alignof__(type))))
-
-#endif /* __LINUX_PERCPU_H */
diff --git a/xen/include/asm-ia64/linux/pm.h b/xen/include/asm-ia64/linux/pm.h
deleted file mode 100644
index 070394e846..0000000000
--- a/xen/include/asm-ia64/linux/pm.h
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * pm.h - Power management interface
- *
- * Copyright (C) 2000 Andrew Henroid
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _LINUX_PM_H
-#define _LINUX_PM_H
-
-#ifdef __KERNEL__
-
-#include <linux/list.h>
-#include <asm/atomic.h>
-
-/*
- * Power management requests... these are passed to pm_send_all() and friends.
- *
- * these functions are old and deprecated, see below.
- */
-typedef int __bitwise pm_request_t;
-
-#define PM_SUSPEND ((__force pm_request_t) 1) /* enter D1-D3 */
-#define PM_RESUME ((__force pm_request_t) 2) /* enter D0 */
-
-
-/*
- * Device types... these are passed to pm_register
- */
-typedef int __bitwise pm_dev_t;
-
-#define PM_UNKNOWN_DEV ((__force pm_dev_t) 0) /* generic */
-#define PM_SYS_DEV ((__force pm_dev_t) 1) /* system device (fan, KB controller, ...) */
-#define PM_PCI_DEV ((__force pm_dev_t) 2) /* PCI device */
-#define PM_USB_DEV ((__force pm_dev_t) 3) /* USB device */
-#define PM_SCSI_DEV ((__force pm_dev_t) 4) /* SCSI device */
-#define PM_ISA_DEV ((__force pm_dev_t) 5) /* ISA device */
-#define PM_MTD_DEV ((__force pm_dev_t) 6) /* Memory Technology Device */
-
-/*
- * System device hardware ID (PnP) values
- */
-enum
-{
- PM_SYS_UNKNOWN = 0x00000000, /* generic */
- PM_SYS_KBC = 0x41d00303, /* keyboard controller */
- PM_SYS_COM = 0x41d00500, /* serial port */
- PM_SYS_IRDA = 0x41d00510, /* IRDA controller */
- PM_SYS_FDC = 0x41d00700, /* floppy controller */
- PM_SYS_VGA = 0x41d00900, /* VGA controller */
- PM_SYS_PCMCIA = 0x41d00e00, /* PCMCIA controller */
-};
-
-/*
- * Device identifier
- */
-#define PM_PCI_ID(dev) ((dev)->bus->number << 16 | (dev)->devfn)
-
-/*
- * Request handler callback
- */
-struct pm_dev;
-
-typedef int (*pm_callback)(struct pm_dev *dev, pm_request_t rqst, void *data);
-
-/*
- * Dynamic device information
- */
-struct pm_dev
-{
- pm_dev_t type;
- unsigned long id;
- pm_callback callback;
- void *data;
-
- unsigned long flags;
- unsigned long state;
- unsigned long prev_state;
-
- struct list_head entry;
-};
-
-/* Functions above this comment are list-based old-style power
- * managment. Please avoid using them. */
-
-/*
- * Callbacks for platform drivers to implement.
- */
-extern void (*pm_idle)(void);
-extern void (*pm_power_off)(void);
-
-typedef int __bitwise suspend_state_t;
-
-#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
-#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
-#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
-#define PM_SUSPEND_DISK ((__force suspend_state_t) 4)
-#define PM_SUSPEND_MAX ((__force suspend_state_t) 5)
-
-typedef int __bitwise suspend_disk_method_t;
-
-#define PM_DISK_FIRMWARE ((__force suspend_disk_method_t) 1)
-#define PM_DISK_PLATFORM ((__force suspend_disk_method_t) 2)
-#define PM_DISK_SHUTDOWN ((__force suspend_disk_method_t) 3)
-#define PM_DISK_REBOOT ((__force suspend_disk_method_t) 4)
-#define PM_DISK_TEST ((__force suspend_disk_method_t) 5)
-#define PM_DISK_TESTPROC ((__force suspend_disk_method_t) 6)
-#define PM_DISK_MAX ((__force suspend_disk_method_t) 7)
-
-struct pm_ops {
- suspend_disk_method_t pm_disk_mode;
- int (*valid)(suspend_state_t state);
- int (*prepare)(suspend_state_t state);
- int (*enter)(suspend_state_t state);
- int (*finish)(suspend_state_t state);
-};
-
-extern void pm_set_ops(struct pm_ops *);
-extern struct pm_ops *pm_ops;
-extern int pm_suspend(suspend_state_t state);
-
-
-/*
- * Device power management
- */
-
-struct device;
-
-typedef struct pm_message {
- int event;
-} pm_message_t;
-
-/*
- * Several driver power state transitions are externally visible, affecting
- * the state of pending I/O queues and (for drivers that touch hardware)
- * interrupts, wakeups, DMA, and other hardware state. There may also be
- * internal transitions to various low power modes, which are transparent
- * to the rest of the driver stack (such as a driver that's ON gating off
- * clocks which are not in active use).
- *
- * One transition is triggered by resume(), after a suspend() call; the
- * message is implicit:
- *
- * ON Driver starts working again, responding to hardware events
- * and software requests. The hardware may have gone through
- * a power-off reset, or it may have maintained state from the
- * previous suspend() which the driver will rely on while
- * resuming. On most platforms, there are no restrictions on
- * availability of resources like clocks during resume().
- *
- * Other transitions are triggered by messages sent using suspend(). All
- * these transitions quiesce the driver, so that I/O queues are inactive.
- * That commonly entails turning off IRQs and DMA; there may be rules
- * about how to quiesce that are specific to the bus or the device's type.
- * (For example, network drivers mark the link state.) Other details may
- * differ according to the message:
- *
- * SUSPEND Quiesce, enter a low power device state appropriate for
- * the upcoming system state (such as PCI_D3hot), and enable
- * wakeup events as appropriate.
- *
- * FREEZE Quiesce operations so that a consistent image can be saved;
- * but do NOT otherwise enter a low power device state, and do
- * NOT emit system wakeup events.
- *
- * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring
- * the system from a snapshot taken after an earlier FREEZE.
- * Some drivers will need to reset their hardware state instead
- * of preserving it, to ensure that it's never mistaken for the
- * state which that earlier snapshot had set up.
- *
- * A minimally power-aware driver treats all messages as SUSPEND, fully
- * reinitializes its device during resume() -- whether or not it was reset
- * during the suspend/resume cycle -- and can't issue wakeup events.
- *
- * More power-aware drivers may also use low power states at runtime as
- * well as during system sleep states like PM_SUSPEND_STANDBY. They may
- * be able to use wakeup events to exit from runtime low-power states,
- * or from system low-power states such as standby or suspend-to-RAM.
- */
-
-#define PM_EVENT_ON 0
-#define PM_EVENT_FREEZE 1
-#define PM_EVENT_SUSPEND 2
-#define PM_EVENT_PRETHAW 3
-
-#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
-#define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, })
-#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
-#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
-
-struct dev_pm_info {
- pm_message_t power_state;
- unsigned can_wakeup:1;
-#ifdef CONFIG_PM
- unsigned should_wakeup:1;
- pm_message_t prev_state;
- void * saved_state;
- struct device * pm_parent;
- struct list_head entry;
-#endif
-};
-
-extern void device_pm_set_parent(struct device * dev, struct device * parent);
-
-extern int device_power_down(pm_message_t state);
-extern void device_power_up(void);
-extern void device_resume(void);
-
-#ifdef CONFIG_PM
-extern suspend_disk_method_t pm_disk_mode;
-
-extern int device_suspend(pm_message_t state);
-extern int device_prepare_suspend(pm_message_t state);
-
-#define device_set_wakeup_enable(dev,val) \
- ((dev)->power.should_wakeup = !!(val))
-#define device_may_wakeup(dev) \
- (device_can_wakeup(dev) && (dev)->power.should_wakeup)
-
-extern int dpm_runtime_suspend(struct device *, pm_message_t);
-extern void dpm_runtime_resume(struct device *);
-extern void __suspend_report_result(const char *function, void *fn, int ret);
-
-#define suspend_report_result(fn, ret) \
- do { \
- __suspend_report_result(__FUNCTION__, fn, ret); \
- } while (0)
-
-#else /* !CONFIG_PM */
-
-static inline int device_suspend(pm_message_t state)
-{
- return 0;
-}
-
-#define device_set_wakeup_enable(dev,val) do{}while(0)
-#define device_may_wakeup(dev) (0)
-
-static inline int dpm_runtime_suspend(struct device * dev, pm_message_t state)
-{
- return 0;
-}
-
-static inline void dpm_runtime_resume(struct device * dev)
-{
-}
-
-#define suspend_report_result(fn, ret) do { } while (0)
-
-#endif
-
-/* changes to device_may_wakeup take effect on the next pm state change.
- * by default, devices should wakeup if they can.
- */
-#define device_can_wakeup(dev) \
- ((dev)->power.can_wakeup)
-#define device_init_wakeup(dev,val) \
- do { \
- device_can_wakeup(dev) = !!(val); \
- device_set_wakeup_enable(dev,val); \
- } while(0)
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_PM_H */
diff --git a/xen/include/asm-ia64/linux/preempt.h b/xen/include/asm-ia64/linux/preempt.h
deleted file mode 100644
index dd98c54a23..0000000000
--- a/xen/include/asm-ia64/linux/preempt.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __LINUX_PREEMPT_H
-#define __LINUX_PREEMPT_H
-
-/*
- * include/linux/preempt.h - macros for accessing and manipulating
- * preempt_count (used for kernel preemption, interrupt count, etc.)
- */
-
-#include <linux/config.h>
-#include <linux/linkage.h>
-
-#ifdef CONFIG_DEBUG_PREEMPT
- extern void fastcall add_preempt_count(int val);
- extern void fastcall sub_preempt_count(int val);
-#else
-# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
-# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
-#endif
-
-#define inc_preempt_count() add_preempt_count(1)
-#define dec_preempt_count() sub_preempt_count(1)
-
-#define preempt_count() (current_thread_info()->preempt_count)
-
-#ifdef CONFIG_PREEMPT
-
-asmlinkage void preempt_schedule(void);
-
-#define preempt_disable() \
-do { \
- inc_preempt_count(); \
- barrier(); \
-} while (0)
-
-#define preempt_enable_no_resched() \
-do { \
- barrier(); \
- dec_preempt_count(); \
-} while (0)
-
-#define preempt_check_resched() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule(); \
-} while (0)
-
-#define preempt_enable() \
-do { \
- preempt_enable_no_resched(); \
- preempt_check_resched(); \
-} while (0)
-
-#else
-
-#define preempt_disable() do { } while (0)
-#define preempt_enable_no_resched() do { } while (0)
-#define preempt_enable() do { } while (0)
-#define preempt_check_resched() do { } while (0)
-
-#endif
-
-#endif /* __LINUX_PREEMPT_H */
diff --git a/xen/include/asm-ia64/linux/seqlock.h b/xen/include/asm-ia64/linux/seqlock.h
deleted file mode 100644
index fca9b0fb5b..0000000000
--- a/xen/include/asm-ia64/linux/seqlock.h
+++ /dev/null
@@ -1,175 +0,0 @@
-#ifndef __LINUX_SEQLOCK_H
-#define __LINUX_SEQLOCK_H
-/*
- * Reader/writer consistent mechanism without starving writers. This type of
- * lock for data where the reader wants a consitent set of information
- * and is willing to retry if the information changes. Readers never
- * block but they may have to retry if a writer is in
- * progress. Writers do not wait for readers.
- *
- * This is not as cache friendly as brlock. Also, this will not work
- * for data that contains pointers, because any writer could
- * invalidate a pointer that a reader was following.
- *
- * Expected reader usage:
- * do {
- * seq = read_seqbegin(&foo);
- * ...
- * } while (read_seqretry(&foo, seq));
- *
- *
- * On non-SMP the spin locks disappear but the writer still needs
- * to increment the sequence variables because an interrupt routine could
- * change the state of the data.
- *
- * Based on x86_64 vsyscall gettimeofday
- * by Keith Owens and Andrea Arcangeli
- */
-
-#include <linux/config.h>
-#include <linux/spinlock.h>
-#include <linux/preempt.h>
-
-typedef struct {
- unsigned sequence;
- spinlock_t lock;
-} seqlock_t;
-
-/*
- * These macros triggered gcc-3.x compile-time problems. We think these are
- * OK now. Be cautious.
- */
-#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED }
-#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0)
-
-
-/* Lock out other writers and update the count.
- * Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
- */
-static inline void write_seqlock(seqlock_t *sl)
-{
- spin_lock(&sl->lock);
- ++sl->sequence;
- smp_wmb();
-}
-
-static inline void write_sequnlock(seqlock_t *sl)
-{
- smp_wmb();
- sl->sequence++;
- spin_unlock(&sl->lock);
-}
-
-static inline int write_tryseqlock(seqlock_t *sl)
-{
- int ret = spin_trylock(&sl->lock);
-
- if (ret) {
- ++sl->sequence;
- smp_wmb();
- }
- return ret;
-}
-
-/* Start of read calculation -- fetch last complete writer token */
-static inline unsigned read_seqbegin(const seqlock_t *sl)
-{
- unsigned ret = sl->sequence;
- smp_rmb();
- return ret;
-}
-
-/* Test if reader processed invalid data.
- * If initial values is odd,
- * then writer had already started when section was entered
- * If sequence value changed
- * then writer changed data while in section
- *
- * Using xor saves one conditional branch.
- */
-static inline int read_seqretry(const seqlock_t *sl, unsigned iv)
-{
- smp_rmb();
- return (iv & 1) | (sl->sequence ^ iv);
-}
-
-
-/*
- * Version using sequence counter only.
- * This can be used when code has its own mutex protecting the
- * updating starting before the write_seqcountbeqin() and ending
- * after the write_seqcount_end().
- */
-
-typedef struct seqcount {
- unsigned sequence;
-} seqcount_t;
-
-#define SEQCNT_ZERO { 0 }
-#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
-
-/* Start of read using pointer to a sequence counter only. */
-static inline unsigned read_seqcount_begin(const seqcount_t *s)
-{
- unsigned ret = s->sequence;
- smp_rmb();
- return ret;
-}
-
-/* Test if reader processed invalid data.
- * Equivalent to: iv is odd or sequence number has changed.
- * (iv & 1) || (*s != iv)
- * Using xor saves one conditional branch.
- */
-static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv)
-{
- smp_rmb();
- return (iv & 1) | (s->sequence ^ iv);
-}
-
-
-/*
- * Sequence counter only version assumes that callers are using their
- * own mutexing.
- */
-static inline void write_seqcount_begin(seqcount_t *s)
-{
- s->sequence++;
- smp_wmb();
-}
-
-static inline void write_seqcount_end(seqcount_t *s)
-{
- smp_wmb();
- s->sequence++;
-}
-
-/*
- * Possible sw/hw IRQ protected versions of the interfaces.
- */
-#define write_seqlock_irqsave(lock, flags) \
- do { local_irq_save(flags); write_seqlock(lock); } while (0)
-#define write_seqlock_irq(lock) \
- do { local_irq_disable(); write_seqlock(lock); } while (0)
-#define write_seqlock_bh(lock) \
- do { local_bh_disable(); write_seqlock(lock); } while (0)
-
-#define write_sequnlock_irqrestore(lock, flags) \
- do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
-#define write_sequnlock_irq(lock) \
- do { write_sequnlock(lock); local_irq_enable(); } while(0)
-#define write_sequnlock_bh(lock) \
- do { write_sequnlock(lock); local_bh_enable(); } while(0)
-
-#define read_seqbegin_irqsave(lock, flags) \
- ({ local_irq_save(flags); read_seqbegin(lock); })
-
-#define read_seqretry_irqrestore(lock, iv, flags) \
- ({ \
- int ret = read_seqretry(lock, iv); \
- local_irq_restore(flags); \
- ret; \
- })
-
-#endif /* __LINUX_SEQLOCK_H */
diff --git a/xen/include/asm-ia64/linux/stddef.h b/xen/include/asm-ia64/linux/stddef.h
deleted file mode 100644
index b3a2cadf90..0000000000
--- a/xen/include/asm-ia64/linux/stddef.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _LINUX_STDDEF_H
-#define _LINUX_STDDEF_H
-
-#include <linux/compiler.h>
-
-#undef NULL
-#if defined(__cplusplus)
-#define NULL 0
-#else
-#define NULL ((void *)0)
-#endif
-
-#undef offsetof
-#ifdef __compiler_offsetof
-#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
-#else
-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
-#endif
-
-#endif
diff --git a/xen/include/asm-ia64/linux/sysfs.h b/xen/include/asm-ia64/linux/sysfs.h
deleted file mode 100644
index 6d5c43d31d..0000000000
--- a/xen/include/asm-ia64/linux/sysfs.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * sysfs.h - definitions for the device driver filesystem
- *
- * Copyright (c) 2001,2002 Patrick Mochel
- * Copyright (c) 2004 Silicon Graphics, Inc.
- *
- * Please see Documentation/filesystems/sysfs.txt for more information.
- */
-
-#ifndef _SYSFS_H_
-#define _SYSFS_H_
-
-#include <linux/compiler.h>
-#include <asm/atomic.h>
-
-struct kobject;
-struct module;
-
-struct attribute {
- const char * name;
- struct module * owner;
- mode_t mode;
-};
-
-struct attribute_group {
- const char * name;
- struct attribute ** attrs;
-};
-
-
-
-/**
- * Use these macros to make defining attributes easier. See include/linux/device.h
- * for examples..
- */
-
-#define __ATTR(_name,_mode,_show,_store) { \
- .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \
- .show = _show, \
- .store = _store, \
-}
-
-#define __ATTR_RO(_name) { \
- .attr = { .name = __stringify(_name), .mode = 0444, .owner = THIS_MODULE }, \
- .show = _name##_show, \
-}
-
-#define __ATTR_NULL { .attr = { .name = NULL } }
-
-#define attr_name(_attr) (_attr).attr.name
-
-struct vm_area_struct;
-
-struct bin_attribute {
- struct attribute attr;
- size_t size;
- void *private;
- ssize_t (*read)(struct kobject *, char *, loff_t, size_t);
- ssize_t (*write)(struct kobject *, char *, loff_t, size_t);
- int (*mmap)(struct kobject *, struct bin_attribute *attr,
- struct vm_area_struct *vma);
-};
-
-struct sysfs_ops {
- ssize_t (*show)(struct kobject *, struct attribute *,char *);
- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
-};
-
-struct sysfs_dirent {
- atomic_t s_count;
- struct list_head s_sibling;
- struct list_head s_children;
- void * s_element;
- int s_type;
- umode_t s_mode;
- struct dentry * s_dentry;
- struct iattr * s_iattr;
- atomic_t s_event;
-};
-
-#define SYSFS_ROOT 0x0001
-#define SYSFS_DIR 0x0002
-#define SYSFS_KOBJ_ATTR 0x0004
-#define SYSFS_KOBJ_BIN_ATTR 0x0008
-#define SYSFS_KOBJ_LINK 0x0020
-#define SYSFS_NOT_PINNED (SYSFS_KOBJ_ATTR | SYSFS_KOBJ_BIN_ATTR | SYSFS_KOBJ_LINK)
-
-#ifdef CONFIG_SYSFS
-
-extern int __must_check
-sysfs_create_dir(struct kobject *);
-
-extern void
-sysfs_remove_dir(struct kobject *);
-
-extern int __must_check
-sysfs_rename_dir(struct kobject *, const char *new_name);
-
-extern int __must_check
-sysfs_create_file(struct kobject *, const struct attribute *);
-
-extern int __must_check
-sysfs_update_file(struct kobject *, const struct attribute *);
-
-extern int __must_check
-sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode);
-
-extern void
-sysfs_remove_file(struct kobject *, const struct attribute *);
-
-extern int __must_check
-sysfs_create_link(struct kobject * kobj, struct kobject * target, const char * name);
-
-extern void
-sysfs_remove_link(struct kobject *, const char * name);
-
-int __must_check sysfs_create_bin_file(struct kobject *kobj,
- struct bin_attribute *attr);
-void sysfs_remove_bin_file(struct kobject *kobj, struct bin_attribute *attr);
-
-int __must_check sysfs_create_group(struct kobject *,
- const struct attribute_group *);
-void sysfs_remove_group(struct kobject *, const struct attribute_group *);
-void sysfs_notify(struct kobject * k, char *dir, char *attr);
-
-extern int __must_check sysfs_init(void);
-
-#else /* CONFIG_SYSFS */
-
-static inline int sysfs_create_dir(struct kobject * k)
-{
- return 0;
-}
-
-static inline void sysfs_remove_dir(struct kobject * k)
-{
- ;
-}
-
-static inline int sysfs_rename_dir(struct kobject * k, const char *new_name)
-{
- return 0;
-}
-
-static inline int sysfs_create_file(struct kobject * k, const struct attribute * a)
-{
- return 0;
-}
-
-static inline int sysfs_update_file(struct kobject * k, const struct attribute * a)
-{
- return 0;
-}
-static inline int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
-{
- return 0;
-}
-
-static inline void sysfs_remove_file(struct kobject * k, const struct attribute * a)
-{
- ;
-}
-
-static inline int sysfs_create_link(struct kobject * k, struct kobject * t, const char * n)
-{
- return 0;
-}
-
-static inline void sysfs_remove_link(struct kobject * k, const char * name)
-{
- ;
-}
-
-
-static inline int sysfs_create_bin_file(struct kobject * k, struct bin_attribute * a)
-{
- return 0;
-}
-
-static inline int sysfs_remove_bin_file(struct kobject * k, struct bin_attribute * a)
-{
- return 0;
-}
-
-static inline int sysfs_create_group(struct kobject * k, const struct attribute_group *g)
-{
- return 0;
-}
-
-static inline void sysfs_remove_group(struct kobject * k, const struct attribute_group * g)
-{
- ;
-}
-
-static inline void sysfs_notify(struct kobject * k, char *dir, char *attr)
-{
-}
-
-static inline int __must_check sysfs_init(void)
-{
- return 0;
-}
-
-#endif /* CONFIG_SYSFS */
-
-#endif /* _SYSFS_H_ */
diff --git a/xen/include/asm-ia64/linux/thread_info.h b/xen/include/asm-ia64/linux/thread_info.h
deleted file mode 100644
index d252f45a0f..0000000000
--- a/xen/include/asm-ia64/linux/thread_info.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* thread_info.h: common low-level thread information accessors
- *
- * Copyright (C) 2002 David Howells (dhowells@redhat.com)
- * - Incorporating suggestions made by Linus Torvalds
- */
-
-#ifndef _LINUX_THREAD_INFO_H
-#define _LINUX_THREAD_INFO_H
-
-/*
- * System call restart block.
- */
-struct restart_block {
- long (*fn)(struct restart_block *);
- unsigned long arg0, arg1, arg2, arg3;
-};
-
-extern long do_no_restart_syscall(struct restart_block *parm);
-
-#include <linux/bitops.h>
-#include <asm/thread_info.h>
-
-#ifdef __KERNEL__
-
-/*
- * flag set/clear/test wrappers
- * - pass TIF_xxxx constants to these functions
- */
-
-static inline void set_thread_flag(int flag)
-{
- set_bit(flag,&current_thread_info()->flags);
-}
-
-static inline void clear_thread_flag(int flag)
-{
- clear_bit(flag,&current_thread_info()->flags);
-}
-
-static inline int test_and_set_thread_flag(int flag)
-{
- return test_and_set_bit(flag,&current_thread_info()->flags);
-}
-
-static inline int test_and_clear_thread_flag(int flag)
-{
- return test_and_clear_bit(flag,&current_thread_info()->flags);
-}
-
-static inline int test_thread_flag(int flag)
-{
- return test_bit(flag,&current_thread_info()->flags);
-}
-
-static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
-{
- set_bit(flag,&ti->flags);
-}
-
-static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
-{
- clear_bit(flag,&ti->flags);
-}
-
-static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
-{
- return test_and_set_bit(flag,&ti->flags);
-}
-
-static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
-{
- return test_and_clear_bit(flag,&ti->flags);
-}
-
-static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
-{
- return test_bit(flag,&ti->flags);
-}
-
-static inline void set_need_resched(void)
-{
- set_thread_flag(TIF_NEED_RESCHED);
-}
-
-static inline void clear_need_resched(void)
-{
- clear_thread_flag(TIF_NEED_RESCHED);
-}
-
-#endif
-
-#endif /* _LINUX_THREAD_INFO_H */
diff --git a/xen/include/asm-ia64/linux/time.h b/xen/include/asm-ia64/linux/time.h
deleted file mode 100644
index 5634497ff5..0000000000
--- a/xen/include/asm-ia64/linux/time.h
+++ /dev/null
@@ -1,181 +0,0 @@
-#ifndef _LINUX_TIME_H
-#define _LINUX_TIME_H
-
-#include <linux/types.h>
-
-#ifdef __KERNEL__
-#include <linux/seqlock.h>
-#endif
-
-#ifndef _STRUCT_TIMESPEC
-#define _STRUCT_TIMESPEC
-struct timespec {
- time_t tv_sec; /* seconds */
- long tv_nsec; /* nanoseconds */
-};
-#endif /* _STRUCT_TIMESPEC */
-
-struct timeval {
- time_t tv_sec; /* seconds */
- suseconds_t tv_usec; /* microseconds */
-};
-
-struct timezone {
- int tz_minuteswest; /* minutes west of Greenwich */
- int tz_dsttime; /* type of dst correction */
-};
-
-#ifdef __KERNEL__
-
-/* Parameters used to convert the timespec values */
-#ifndef USEC_PER_SEC
-#define USEC_PER_SEC (1000000L)
-#endif
-
-#ifndef NSEC_PER_SEC
-#define NSEC_PER_SEC (1000000000L)
-#endif
-
-#ifndef NSEC_PER_USEC
-#define NSEC_PER_USEC (1000L)
-#endif
-
-static __inline__ int timespec_equal(struct timespec *a, struct timespec *b)
-{
- return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
-}
-
-/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
- * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
- * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
- *
- * [For the Julian calendar (which was used in Russia before 1917,
- * Britain & colonies before 1752, anywhere else before 1582,
- * and is still in use by some communities) leave out the
- * -year/100+year/400 terms, and add 10.]
- *
- * This algorithm was first published by Gauss (I think).
- *
- * WARNING: this function will overflow on 2106-02-07 06:28:16 on
- * machines were long is 32-bit! (However, as time_t is signed, we
- * will already get problems at other places on 2038-01-19 03:14:08)
- */
-static inline unsigned long
-mktime (unsigned int year, unsigned int mon,
- unsigned int day, unsigned int hour,
- unsigned int min, unsigned int sec)
-{
- if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
- mon += 12; /* Puts Feb last since it has leap day */
- year -= 1;
- }
-
- return (((
- (unsigned long) (year/4 - year/100 + year/400 + 367*mon/12 + day) +
- year*365 - 719499
- )*24 + hour /* now have hours */
- )*60 + min /* now have minutes */
- )*60 + sec; /* finally seconds */
-}
-
-extern struct timespec xtime;
-extern struct timespec wall_to_monotonic;
-extern seqlock_t xtime_lock;
-
-static inline unsigned long get_seconds(void)
-{
- return xtime.tv_sec;
-}
-
-struct timespec current_kernel_time(void);
-
-#define CURRENT_TIME (current_kernel_time())
-#define CURRENT_TIME_SEC ((struct timespec) { xtime.tv_sec, 0 })
-
-extern void do_gettimeofday(struct timeval *tv);
-extern int do_settimeofday(struct timespec *tv);
-extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
-extern void clock_was_set(void); // call when ever the clock is set
-extern int do_posix_clock_monotonic_gettime(struct timespec *tp);
-extern long do_nanosleep(struct timespec *t);
-extern long do_utimes(char __user * filename, struct timeval * times);
-struct itimerval;
-extern int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue);
-extern int do_getitimer(int which, struct itimerval *value);
-extern void getnstimeofday (struct timespec *tv);
-
-extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
-
-static inline void
-set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
-{
- while (nsec > NSEC_PER_SEC) {
- nsec -= NSEC_PER_SEC;
- ++sec;
- }
- while (nsec < 0) {
- nsec += NSEC_PER_SEC;
- --sec;
- }
- ts->tv_sec = sec;
- ts->tv_nsec = nsec;
-}
-
-#endif /* __KERNEL__ */
-
-#define NFDBITS __NFDBITS
-
-#define FD_SETSIZE __FD_SETSIZE
-#define FD_SET(fd,fdsetp) __FD_SET(fd,fdsetp)
-#define FD_CLR(fd,fdsetp) __FD_CLR(fd,fdsetp)
-#define FD_ISSET(fd,fdsetp) __FD_ISSET(fd,fdsetp)
-#define FD_ZERO(fdsetp) __FD_ZERO(fdsetp)
-
-/*
- * Names of the interval timers, and structure
- * defining a timer setting.
- */
-#define ITIMER_REAL 0
-#define ITIMER_VIRTUAL 1
-#define ITIMER_PROF 2
-
-struct itimerspec {
- struct timespec it_interval; /* timer period */
- struct timespec it_value; /* timer expiration */
-};
-
-struct itimerval {
- struct timeval it_interval; /* timer interval */
- struct timeval it_value; /* current value */
-};
-
-
-/*
- * The IDs of the various system clocks (for POSIX.1b interval timers).
- */
-#define CLOCK_REALTIME 0
-#define CLOCK_MONOTONIC 1
-#define CLOCK_PROCESS_CPUTIME_ID 2
-#define CLOCK_THREAD_CPUTIME_ID 3
-#define CLOCK_REALTIME_HR 4
-#define CLOCK_MONOTONIC_HR 5
-
-/*
- * The IDs of various hardware clocks
- */
-
-
-#define CLOCK_SGI_CYCLE 10
-#define MAX_CLOCKS 16
-#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC | \
- CLOCK_REALTIME_HR | CLOCK_MONOTONIC_HR)
-#define CLOCKS_MONO (CLOCK_MONOTONIC & CLOCK_MONOTONIC_HR)
-
-/*
- * The various flags for setting POSIX.1b interval timers.
- */
-
-#define TIMER_ABSTIME 0x01
-
-
-#endif
diff --git a/xen/include/asm-ia64/linux/timex.h b/xen/include/asm-ia64/linux/timex.h
deleted file mode 100644
index 74fdd07d37..0000000000
--- a/xen/include/asm-ia64/linux/timex.h
+++ /dev/null
@@ -1,320 +0,0 @@
-/*****************************************************************************
- * *
- * Copyright (c) David L. Mills 1993 *
- * *
- * Permission to use, copy, modify, and distribute this software and its *
- * documentation for any purpose and without fee is hereby granted, provided *
- * that the above copyright notice appears in all copies and that both the *
- * copyright notice and this permission notice appear in supporting *
- * documentation, and that the name University of Delaware not be used in *
- * advertising or publicity pertaining to distribution of the software *
- * without specific, written prior permission. The University of Delaware *
- * makes no representations about the suitability this software for any *
- * purpose. It is provided "as is" without express or implied warranty. *
- * *
- *****************************************************************************/
-
-/*
- * Modification history timex.h
- *
- * 29 Dec 97 Russell King
- * Moved CLOCK_TICK_RATE, CLOCK_TICK_FACTOR and FINETUNE to asm/timex.h
- * for ARM machines
- *
- * 9 Jan 97 Adrian Sun
- * Shifted LATCH define to allow access to alpha machines.
- *
- * 26 Sep 94 David L. Mills
- * Added defines for hybrid phase/frequency-lock loop.
- *
- * 19 Mar 94 David L. Mills
- * Moved defines from kernel routines to header file and added new
- * defines for PPS phase-lock loop.
- *
- * 20 Feb 94 David L. Mills
- * Revised status codes and structures for external clock and PPS
- * signal discipline.
- *
- * 28 Nov 93 David L. Mills
- * Adjusted parameters to improve stability and increase poll
- * interval.
- *
- * 17 Sep 93 David L. Mills
- * Created file $NTP/include/sys/timex.h
- * 07 Oct 93 Torsten Duwe
- * Derived linux/timex.h
- * 1995-08-13 Torsten Duwe
- * kernel PLL updated to 1994-12-13 specs (rfc-1589)
- * 1997-08-30 Ulrich Windl
- * Added new constant NTP_PHASE_LIMIT
- * 2004-08-12 Christoph Lameter
- * Reworked time interpolation logic
- */
-#ifndef _LINUX_TIMEX_H
-#define _LINUX_TIMEX_H
-
-#include <linux/config.h>
-#include <linux/compiler.h>
-#include <linux/time.h>
-
-#include <asm/param.h>
-#include <asm/timex.h>
-
-/*
- * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
- * for a slightly underdamped convergence characteristic. SHIFT_KH
- * establishes the damping of the FLL and is chosen by wisdom and black
- * art.
- *
- * MAXTC establishes the maximum time constant of the PLL. With the
- * SHIFT_KG and SHIFT_KF values given and a time constant range from
- * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
- * respectively.
- */
-#define SHIFT_KG 6 /* phase factor (shift) */
-#define SHIFT_KF 16 /* PLL frequency factor (shift) */
-#define SHIFT_KH 2 /* FLL frequency factor (shift) */
-#define MAXTC 6 /* maximum time constant (shift) */
-
-/*
- * The SHIFT_SCALE define establishes the decimal point of the time_phase
- * variable which serves as an extension to the low-order bits of the
- * system clock variable. The SHIFT_UPDATE define establishes the decimal
- * point of the time_offset variable which represents the current offset
- * with respect to standard time. The FINENSEC define represents 1 nsec in
- * scaled units.
- *
- * SHIFT_USEC defines the scaling (shift) of the time_freq and
- * time_tolerance variables, which represent the current frequency
- * offset and maximum frequency tolerance.
- *
- * FINENSEC is 1 ns in SHIFT_UPDATE units of the time_phase variable.
- */
-#define SHIFT_SCALE 22 /* phase scale (shift) */
-#define SHIFT_UPDATE (SHIFT_KG + MAXTC) /* time offset scale (shift) */
-#define SHIFT_USEC 16 /* frequency offset scale (shift) */
-#define FINENSEC (1L << (SHIFT_SCALE - 10)) /* ~1 ns in phase units */
-
-#define MAXPHASE 512000L /* max phase error (us) */
-#define MAXFREQ (512L << SHIFT_USEC) /* max frequency error (ppm) */
-#define MAXTIME (200L << PPS_AVG) /* max PPS error (jitter) (200 us) */
-#define MINSEC 16L /* min interval between updates (s) */
-#define MAXSEC 1200L /* max interval between updates (s) */
-#define NTP_PHASE_LIMIT (MAXPHASE << 5) /* beyond max. dispersion */
-
-/*
- * The following defines are used only if a pulse-per-second (PPS)
- * signal is available and connected via a modem control lead, such as
- * produced by the optional ppsclock feature incorporated in the Sun
- * asynch driver. They establish the design parameters of the frequency-
- * lock loop used to discipline the CPU clock oscillator to the PPS
- * signal.
- *
- * PPS_AVG is the averaging factor for the frequency loop, as well as
- * the time and frequency dispersion.
- *
- * PPS_SHIFT and PPS_SHIFTMAX specify the minimum and maximum
- * calibration intervals, respectively, in seconds as a power of two.
- *
- * PPS_VALID is the maximum interval before the PPS signal is considered
- * invalid and protocol updates used directly instead.
- *
- * MAXGLITCH is the maximum interval before a time offset of more than
- * MAXTIME is believed.
- */
-#define PPS_AVG 2 /* pps averaging constant (shift) */
-#define PPS_SHIFT 2 /* min interval duration (s) (shift) */
-#define PPS_SHIFTMAX 8 /* max interval duration (s) (shift) */
-#define PPS_VALID 120 /* pps signal watchdog max (s) */
-#define MAXGLITCH 30 /* pps signal glitch max (s) */
-
-/*
- * syscall interface - used (mainly by NTP daemon)
- * to discipline kernel clock oscillator
- */
-struct timex {
- unsigned int modes; /* mode selector */
- long offset; /* time offset (usec) */
- long freq; /* frequency offset (scaled ppm) */
- long maxerror; /* maximum error (usec) */
- long esterror; /* estimated error (usec) */
- int status; /* clock command/status */
- long constant; /* pll time constant */
- long precision; /* clock precision (usec) (read only) */
- long tolerance; /* clock frequency tolerance (ppm)
- * (read only)
- */
- struct timeval time; /* (read only) */
- long tick; /* (modified) usecs between clock ticks */
-
- long ppsfreq; /* pps frequency (scaled ppm) (ro) */
- long jitter; /* pps jitter (us) (ro) */
- int shift; /* interval duration (s) (shift) (ro) */
- long stabil; /* pps stability (scaled ppm) (ro) */
- long jitcnt; /* jitter limit exceeded (ro) */
- long calcnt; /* calibration intervals (ro) */
- long errcnt; /* calibration errors (ro) */
- long stbcnt; /* stability limit exceeded (ro) */
-
- int :32; int :32; int :32; int :32;
- int :32; int :32; int :32; int :32;
- int :32; int :32; int :32; int :32;
-};
-
-/*
- * Mode codes (timex.mode)
- */
-#define ADJ_OFFSET 0x0001 /* time offset */
-#define ADJ_FREQUENCY 0x0002 /* frequency offset */
-#define ADJ_MAXERROR 0x0004 /* maximum time error */
-#define ADJ_ESTERROR 0x0008 /* estimated time error */
-#define ADJ_STATUS 0x0010 /* clock status */
-#define ADJ_TIMECONST 0x0020 /* pll time constant */
-#define ADJ_TICK 0x4000 /* tick value */
-#define ADJ_OFFSET_SINGLESHOT 0x8001 /* old-fashioned adjtime */
-
-/* xntp 3.4 compatibility names */
-#define MOD_OFFSET ADJ_OFFSET
-#define MOD_FREQUENCY ADJ_FREQUENCY
-#define MOD_MAXERROR ADJ_MAXERROR
-#define MOD_ESTERROR ADJ_ESTERROR
-#define MOD_STATUS ADJ_STATUS
-#define MOD_TIMECONST ADJ_TIMECONST
-#define MOD_CLKB ADJ_TICK
-#define MOD_CLKA ADJ_OFFSET_SINGLESHOT /* 0x8000 in original */
-
-
-/*
- * Status codes (timex.status)
- */
-#define STA_PLL 0x0001 /* enable PLL updates (rw) */
-#define STA_PPSFREQ 0x0002 /* enable PPS freq discipline (rw) */
-#define STA_PPSTIME 0x0004 /* enable PPS time discipline (rw) */
-#define STA_FLL 0x0008 /* select frequency-lock mode (rw) */
-
-#define STA_INS 0x0010 /* insert leap (rw) */
-#define STA_DEL 0x0020 /* delete leap (rw) */
-#define STA_UNSYNC 0x0040 /* clock unsynchronized (rw) */
-#define STA_FREQHOLD 0x0080 /* hold frequency (rw) */
-
-#define STA_PPSSIGNAL 0x0100 /* PPS signal present (ro) */
-#define STA_PPSJITTER 0x0200 /* PPS signal jitter exceeded (ro) */
-#define STA_PPSWANDER 0x0400 /* PPS signal wander exceeded (ro) */
-#define STA_PPSERROR 0x0800 /* PPS signal calibration error (ro) */
-
-#define STA_CLOCKERR 0x1000 /* clock hardware fault (ro) */
-
-#define STA_RONLY (STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | \
- STA_PPSERROR | STA_CLOCKERR) /* read-only bits */
-
-/*
- * Clock states (time_state)
- */
-#define TIME_OK 0 /* clock synchronized, no leap second */
-#define TIME_INS 1 /* insert leap second */
-#define TIME_DEL 2 /* delete leap second */
-#define TIME_OOP 3 /* leap second in progress */
-#define TIME_WAIT 4 /* leap second has occurred */
-#define TIME_ERROR 5 /* clock not synchronized */
-#define TIME_BAD TIME_ERROR /* bw compat */
-
-#ifdef __KERNEL__
-/*
- * kernel variables
- * Note: maximum error = NTP synch distance = dispersion + delay / 2;
- * estimated error = NTP dispersion.
- */
-extern unsigned long tick_usec; /* USER_HZ period (usec) */
-extern unsigned long tick_nsec; /* ACTHZ period (nsec) */
-extern int tickadj; /* amount of adjustment per tick */
-
-/*
- * phase-lock loop variables
- */
-extern int time_state; /* clock status */
-extern int time_status; /* clock synchronization status bits */
-extern long time_offset; /* time adjustment (us) */
-extern long time_constant; /* pll time constant */
-extern long time_tolerance; /* frequency tolerance (ppm) */
-extern long time_precision; /* clock precision (us) */
-extern long time_maxerror; /* maximum error */
-extern long time_esterror; /* estimated error */
-
-extern long time_freq; /* frequency offset (scaled ppm) */
-extern long time_reftime; /* time at last adjustment (s) */
-
-extern long time_adjust; /* The amount of adjtime left */
-extern long time_next_adjust; /* Value for time_adjust at next tick */
-
-/* interface variables pps->timer interrupt */
-extern long pps_offset; /* pps time offset (us) */
-extern long pps_jitter; /* time dispersion (jitter) (us) */
-extern long pps_freq; /* frequency offset (scaled ppm) */
-extern long pps_stabil; /* frequency dispersion (scaled ppm) */
-extern long pps_valid; /* pps signal watchdog counter */
-
-/* interface variables pps->adjtimex */
-extern int pps_shift; /* interval duration (s) (shift) */
-extern long pps_jitcnt; /* jitter limit exceeded */
-extern long pps_calcnt; /* calibration intervals */
-extern long pps_errcnt; /* calibration errors */
-extern long pps_stbcnt; /* stability limit exceeded */
-
-#ifdef CONFIG_TIME_INTERPOLATION
-
-#define TIME_SOURCE_CPU 0
-#define TIME_SOURCE_MMIO64 1
-#define TIME_SOURCE_MMIO32 2
-#define TIME_SOURCE_FUNCTION 3
-
-/* For proper operations time_interpolator clocks must run slightly slower
- * than the standard clock since the interpolator may only correct by having
- * time jump forward during a tick. A slower clock is usually a side effect
- * of the integer divide of the nanoseconds in a second by the frequency.
- * The accuracy of the division can be increased by specifying a shift.
- * However, this may cause the clock not to be slow enough.
- * The interpolator will self-tune the clock by slowing down if no
- * resets occur or speeding up if the time jumps per analysis cycle
- * become too high.
- *
- * Setting jitter compensates for a fluctuating timesource by comparing
- * to the last value read from the timesource to insure that an earlier value
- * is not returned by a later call. The price to pay
- * for the compensation is that the timer routines are not as scalable anymore.
- */
-
-struct time_interpolator {
- u16 source; /* time source flags */
- u8 shift; /* increases accuracy of multiply by shifting. */
- /* Note that bits may be lost if shift is set too high */
- u8 jitter; /* if set compensate for fluctuations */
- u32 nsec_per_cyc; /* set by register_time_interpolator() */
- void *addr; /* address of counter or function */
- u64 mask; /* mask the valid bits of the counter */
- unsigned long offset; /* nsec offset at last update of interpolator */
- u64 last_counter; /* counter value in units of the counter at last update */
- u64 last_cycle; /* Last timer value if TIME_SOURCE_JITTER is set */
- u64 frequency; /* frequency in counts/second */
- long drift; /* drift in parts-per-million (or -1) */
- unsigned long skips; /* skips forward */
- unsigned long ns_skipped; /* nanoseconds skipped */
- struct time_interpolator *next;
-};
-
-extern void register_time_interpolator(struct time_interpolator *);
-extern void unregister_time_interpolator(struct time_interpolator *);
-extern void time_interpolator_reset(void);
-extern unsigned long time_interpolator_get_offset(void);
-
-#else /* !CONFIG_TIME_INTERPOLATION */
-
-static inline void
-time_interpolator_reset(void)
-{
-}
-
-#endif /* !CONFIG_TIME_INTERPOLATION */
-
-#endif /* KERNEL */
-
-#endif /* LINUX_TIMEX_H */
diff --git a/xen/include/asm-ia64/linux/topology.h b/xen/include/asm-ia64/linux/topology.h
deleted file mode 100644
index 713ce88360..0000000000
--- a/xen/include/asm-ia64/linux/topology.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * include/linux/topology.h
- *
- * Written by: Matthew Dobson, IBM Corporation
- *
- * Copyright (C) 2002, IBM Corp.
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send feedback to <colpatch@us.ibm.com>
- */
-#ifndef _LINUX_TOPOLOGY_H
-#define _LINUX_TOPOLOGY_H
-
-#include <linux/cpumask.h>
-#include <linux/bitops.h>
-#include <linux/mmzone.h>
-#include <linux/smp.h>
-#include <asm/topology.h>
-
-#ifndef node_has_online_mem
-#define node_has_online_mem(nid) (1)
-#endif
-
-#ifndef nr_cpus_node
-#define nr_cpus_node(node) \
- ({ \
- cpumask_t __tmp__; \
- __tmp__ = node_to_cpumask(node); \
- cpumask_weight(&__tmp__); \
- })
-#endif
-
-#define for_each_node_with_cpus(node) \
- for_each_online_node(node) \
- if (nr_cpus_node(node))
-
-#ifndef node_distance
-/* Conform to ACPI 2.0 SLIT distance definitions */
-#define LOCAL_DISTANCE 10
-#define REMOTE_DISTANCE 20
-#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
-#endif
-#ifndef PENALTY_FOR_NODE_WITH_CPUS
-#define PENALTY_FOR_NODE_WITH_CPUS (1)
-#endif
-
-/*
- * Below are the 3 major initializers used in building sched_domains:
- * SD_SIBLING_INIT, for SMT domains
- * SD_CPU_INIT, for SMP domains
- * SD_NODE_INIT, for NUMA domains
- *
- * Any architecture that cares to do any tuning to these values should do so
- * by defining their own arch-specific initializer in include/asm/topology.h.
- * A definition there will automagically override these default initializers
- * and allow arch-specific performance tuning of sched_domains.
- */
-#ifdef CONFIG_SCHED_SMT
-/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
- * so can't we drop this in favor of CONFIG_SCHED_SMT?
- */
-#define ARCH_HAS_SCHED_WAKE_IDLE
-/* Common values for SMT siblings */
-#ifndef SD_SIBLING_INIT
-#define SD_SIBLING_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
- .parent = NULL, \
- .groups = NULL, \
- .min_interval = 1, \
- .max_interval = 2, \
- .busy_factor = 8, \
- .imbalance_pct = 110, \
- .cache_hot_time = 0, \
- .cache_nice_tries = 0, \
- .per_cpu_gain = 25, \
- .busy_idx = 0, \
- .idle_idx = 0, \
- .newidle_idx = 1, \
- .wake_idx = 0, \
- .forkexec_idx = 0, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_NEWIDLE \
- | SD_BALANCE_EXEC \
- | SD_WAKE_AFFINE \
- | SD_WAKE_IDLE \
- | SD_SHARE_CPUPOWER, \
- .last_balance = jiffies, \
- .balance_interval = 1, \
- .nr_balance_failed = 0, \
-}
-#endif
-#endif /* CONFIG_SCHED_SMT */
-
-/* Common values for CPUs */
-#ifndef SD_CPU_INIT
-#define SD_CPU_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
- .parent = NULL, \
- .groups = NULL, \
- .min_interval = 1, \
- .max_interval = 4, \
- .busy_factor = 64, \
- .imbalance_pct = 125, \
- .cache_hot_time = (5*1000000/2), \
- .cache_nice_tries = 1, \
- .per_cpu_gain = 100, \
- .busy_idx = 2, \
- .idle_idx = 1, \
- .newidle_idx = 2, \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_NEWIDLE \
- | SD_BALANCE_EXEC \
- | SD_WAKE_AFFINE, \
- .last_balance = jiffies, \
- .balance_interval = 1, \
- .nr_balance_failed = 0, \
-}
-#endif
-
-#ifdef CONFIG_NUMA
-#ifndef SD_NODE_INIT
-#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
-#endif
-#endif /* CONFIG_NUMA */
-
-#endif /* _LINUX_TOPOLOGY_H */
diff --git a/xen/include/asm-ia64/linux/wait.h b/xen/include/asm-ia64/linux/wait.h
deleted file mode 100644
index d38c9fecdc..0000000000
--- a/xen/include/asm-ia64/linux/wait.h
+++ /dev/null
@@ -1,458 +0,0 @@
-#ifndef _LINUX_WAIT_H
-#define _LINUX_WAIT_H
-
-#define WNOHANG 0x00000001
-#define WUNTRACED 0x00000002
-#define WSTOPPED WUNTRACED
-#define WEXITED 0x00000004
-#define WCONTINUED 0x00000008
-#define WNOWAIT 0x01000000 /* Don't reap, just poll status. */
-
-#define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
-#define __WALL 0x40000000 /* Wait on all children, regardless of type */
-#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
-
-/* First argument to waitid: */
-#define P_ALL 0
-#define P_PID 1
-#define P_PGID 2
-
-#ifdef __KERNEL__
-
-#include <linux/config.h>
-#include <linux/list.h>
-#include <linux/stddef.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-#include <asm/current.h>
-
-typedef struct __wait_queue wait_queue_t;
-typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key);
-int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
-
-struct __wait_queue {
- unsigned int flags;
-#define WQ_FLAG_EXCLUSIVE 0x01
- void *private;
- wait_queue_func_t func;
- struct list_head task_list;
-};
-
-struct wait_bit_key {
- void *flags;
- int bit_nr;
-};
-
-struct wait_bit_queue {
- struct wait_bit_key key;
- wait_queue_t wait;
-};
-
-struct __wait_queue_head {
- spinlock_t lock;
- struct list_head task_list;
-};
-typedef struct __wait_queue_head wait_queue_head_t;
-
-
-/*
- * Macros for declaration and initialisaton of the datatypes
- */
-
-#define __WAITQUEUE_INITIALIZER(name, tsk) { \
- .private = tsk, \
- .func = default_wake_function, \
- .task_list = { NULL, NULL } }
-
-#define DECLARE_WAITQUEUE(name, tsk) \
- wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
-
-#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
- .lock = SPIN_LOCK_UNLOCKED, \
- .task_list = { &(name).task_list, &(name).task_list } }
-
-#define DECLARE_WAIT_QUEUE_HEAD(name) \
- wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
-
-#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
- { .flags = word, .bit_nr = bit, }
-
-static inline void init_waitqueue_head(wait_queue_head_t *q)
-{
- spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->task_list);
-}
-
-static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
-{
- q->flags = 0;
- q->private = p;
- q->func = default_wake_function;
-}
-
-static inline void init_waitqueue_func_entry(wait_queue_t *q,
- wait_queue_func_t func)
-{
- q->flags = 0;
- q->private = NULL;
- q->func = func;
-}
-
-static inline int waitqueue_active(wait_queue_head_t *q)
-{
- return !list_empty(&q->task_list);
-}
-
-/*
- * Used to distinguish between sync and async io wait context:
- * sync i/o typically specifies a NULL wait queue entry or a wait
- * queue entry bound to a task (current task) to wake up.
- * aio specifies a wait queue entry with an async notification
- * callback routine, not associated with any task.
- */
-#define is_sync_wait(wait) (!(wait) || ((wait)->private))
-
-extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
-extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
-extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
-
-static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
-{
- list_add(&new->task_list, &head->task_list);
-}
-
-/*
- * Used for wake-one threads:
- */
-static inline void __add_wait_queue_tail(wait_queue_head_t *head,
- wait_queue_t *new)
-{
- list_add_tail(&new->task_list, &head->task_list);
-}
-
-static inline void __remove_wait_queue(wait_queue_head_t *head,
- wait_queue_t *old)
-{
- list_del(&old->task_list);
-}
-
-void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key));
-extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
-extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
-void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
-int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
-int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
-void FASTCALL(wake_up_bit(void *, int));
-int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
-int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
-wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
-
-#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
-#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
-#define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
-#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
-#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
-#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
-#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
-#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
-
-#define __wait_event(wq, condition) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- schedule(); \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
-
-/**
- * wait_event - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
-#define wait_event(wq, condition) \
-do { \
- if (condition) \
- break; \
- __wait_event(wq, condition); \
-} while (0)
-
-#define __wait_event_timeout(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
-
-/**
- * wait_event_timeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @timeout: timeout, in jiffies
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function returns 0 if the @timeout elapsed, and the remaining
- * jiffies if the condition evaluated to true before the timeout elapsed.
- */
-#define wait_event_timeout(wq, condition, timeout) \
-({ \
- long __ret = timeout; \
- if (!(condition)) \
- __wait_event_timeout(wq, condition, __ret); \
- __ret; \
-})
-
-#define __wait_event_interruptible(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (!signal_pending(current)) { \
- schedule(); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
-
-/**
- * wait_event_interruptible - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function will return -ERESTARTSYS if it was interrupted by a
- * signal and 0 if @condition evaluated to true.
- */
-#define wait_event_interruptible(wq, condition) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __wait_event_interruptible(wq, condition, __ret); \
- __ret; \
-})
-
-#define __wait_event_interruptible_timeout(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (!signal_pending(current)) { \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
-
-/**
- * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @timeout: timeout, in jiffies
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
- * was interrupted by a signal, and the remaining jiffies otherwise
- * if the condition evaluated to true before the timeout elapsed.
- */
-#define wait_event_interruptible_timeout(wq, condition, timeout) \
-({ \
- long __ret = timeout; \
- if (!(condition)) \
- __wait_event_interruptible_timeout(wq, condition, __ret); \
- __ret; \
-})
-
-#define __wait_event_interruptible_exclusive(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait_exclusive(&wq, &__wait, \
- TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (!signal_pending(current)) { \
- schedule(); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
-
-#define wait_event_interruptible_exclusive(wq, condition) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __wait_event_interruptible_exclusive(wq, condition, __ret);\
- __ret; \
-})
-
-/*
- * Must be called with the spinlock in the wait_queue_head_t held.
- */
-static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
- wait_queue_t * wait)
-{
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue_tail(q, wait);
-}
-
-/*
- * Must be called with the spinlock in the wait_queue_head_t held.
- */
-static inline void remove_wait_queue_locked(wait_queue_head_t *q,
- wait_queue_t * wait)
-{
- __remove_wait_queue(q, wait);
-}
-
-/*
- * These are the old interfaces to sleep waiting for an event.
- * They are racy. DO NOT use them, use the wait_event* interfaces above.
- * We plan to remove these interfaces during 2.7.
- */
-extern void FASTCALL(sleep_on(wait_queue_head_t *q));
-extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
- signed long timeout));
-extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
-extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
- signed long timeout));
-
-/*
- * Waitqueues which are removed from the waitqueue_head at wakeup time
- */
-void FASTCALL(prepare_to_wait(wait_queue_head_t *q,
- wait_queue_t *wait, int state));
-void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q,
- wait_queue_t *wait, int state));
-void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait));
-int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
-int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
-
-#define DEFINE_WAIT(name) \
- wait_queue_t name = { \
- .private = current, \
- .func = autoremove_wake_function, \
- .task_list = LIST_HEAD_INIT((name).task_list), \
- }
-
-#define DEFINE_WAIT_BIT(name, word, bit) \
- struct wait_bit_queue name = { \
- .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
- .wait = { \
- .private = current, \
- .func = wake_bit_function, \
- .task_list = \
- LIST_HEAD_INIT((name).wait.task_list), \
- }, \
- }
-
-#define init_wait(wait) \
- do { \
- (wait)->private = current; \
- (wait)->func = autoremove_wake_function; \
- INIT_LIST_HEAD(&(wait)->task_list); \
- } while (0)
-
-/**
- * wait_on_bit - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @action: the function used to sleep, which may take special actions
- * @mode: the task state to sleep in
- *
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that waits on a bit.
- * For instance, if one were to have waiters on a bitflag, one would
- * call wait_on_bit() in threads waiting for the bit to clear.
- * One uses wait_on_bit() where one is waiting for the bit to clear,
- * but has no intention of setting it.
- */
-static inline int wait_on_bit(void *word, int bit,
- int (*action)(void *), unsigned mode)
-{
- if (!test_bit(bit, word))
- return 0;
- return out_of_line_wait_on_bit(word, bit, action, mode);
-}
-
-/**
- * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @action: the function used to sleep, which may take special actions
- * @mode: the task state to sleep in
- *
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that waits on a bit
- * when one intends to set it, for instance, trying to lock bitflags.
- * For instance, if one were to have waiters trying to set bitflag
- * and waiting for it to clear before setting it, one would call
- * wait_on_bit() in threads waiting to be able to set the bit.
- * One uses wait_on_bit_lock() where one is waiting for the bit to
- * clear with the intention of setting it, and when done, clearing it.
- */
-static inline int wait_on_bit_lock(void *word, int bit,
- int (*action)(void *), unsigned mode)
-{
- if (!test_and_set_bit(bit, word))
- return 0;
- return out_of_line_wait_on_bit_lock(word, bit, action, mode);
-}
-
-#endif /* __KERNEL__ */
-
-#endif
diff --git a/xen/include/asm-ia64/mach_apic.h b/xen/include/asm-ia64/mach_apic.h
deleted file mode 100644
index 3f96d742a2..0000000000
--- a/xen/include/asm-ia64/mach_apic.h
+++ /dev/null
@@ -1 +0,0 @@
-/* Leave it as blank for compilation. */
diff --git a/xen/include/asm-ia64/mm.h b/xen/include/asm-ia64/mm.h
deleted file mode 100644
index d189dc14d1..0000000000
--- a/xen/include/asm-ia64/mm.h
+++ /dev/null
@@ -1,586 +0,0 @@
-/*
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- * dom0 vp model support
- */
-#ifndef __ASM_IA64_MM_H__
-#define __ASM_IA64_MM_H__
-
-#include <xen/config.h>
-#ifdef LINUX_2_6
-#include <linux/gfp.h>
-#endif
-#include <xen/list.h>
-#include <xen/spinlock.h>
-#include <xen/perfc.h>
-
-#include <asm/processor.h>
-#include <asm/atomic.h>
-#include <asm/tlbflush.h>
-#include <asm/flushtlb.h>
-#include <asm/io.h>
-
-#include <public/xen.h>
-
-/*
- * The following is for page_alloc.c.
- */
-
-typedef unsigned long page_flags_t;
-
-/*
- * Per-page-frame information.
- *
- * Every architecture must ensure the following:
- * 1. 'struct page_info' contains a 'struct list_head list'.
- * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
- */
-#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
-
-#define PRtype_info "016lx"
-
-#ifdef CONFIG_IA64_SHRINK_PAGE_LIST
-/*
- * See include/xen/mm.h.
- * To compress page_list_entry, all the physical address must
- * be addressed by (32 + PAGE_SHIFT) .
- * However this is lower than IA64_MAX_PHYS_BITS = 50.
- */
-#undef page_list_entry
-struct page_list_entry
-{
- u32 next, prev;
-};
-#endif
-
-#ifdef CONFIG_IA64_PICKLE_DOMAIN
-typedef u32 __ia64_domain_t;
-#else
-typedef unsigned long __ia64_domain_t;
-#endif
-
-struct page_info
-{
- /* Each frame can be threaded onto a doubly-linked list. */
- struct page_list_entry list;
-
- /* Reference count and various PGC_xxx flags and fields. */
- unsigned long count_info;
-
- /* Context-dependent fields follow... */
- union {
-
- /* Page is in use: ((count_info & PGC_count_mask) != 0). */
- struct {
- /* Type reference count and various PGT_xxx flags and fields. */
- unsigned long type_info;
- /* Owner of this page (NULL if page is anonymous). */
- __ia64_domain_t _domain; /* pickled format */
- } inuse;
-
- /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
- struct {
- /* Order-size of the free chunk this page is the head of. */
- u32 order;
- /* Do TLBs need flushing for safety before next page use? */
- bool_t need_tlbflush;
- } free;
-
- } u;
-
- /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
- u32 tlbflush_timestamp;
-};
-
-#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
-
-/*
- * Still small set of flags defined by far on IA-64.
- * IA-64 should make it a definition same as x86_64.
- */
-#define PG_shift(idx) (BITS_PER_LONG - (idx))
-#define PG_mask(x, idx) (x ## UL << PG_shift(idx))
-
-/* The following page types are MUTUALLY EXCLUSIVE. */
-#define PGT_none PG_mask(0, 3) /* no special uses of this page */
-#define PGT_l1_page_table PG_mask(1, 3) /* using as an L1 page table? */
-#define PGT_l2_page_table PG_mask(2, 3) /* using as an L2 page table? */
-#define PGT_l3_page_table PG_mask(3, 3) /* using as an L3 page table? */
-#define PGT_l4_page_table PG_mask(4, 3) /* using as an L4 page table? */
- /* Value 5 reserved. See asm-x86/mm.h */
- /* Value 6 reserved. See asm-x86/mm.h */
-#define PGT_writable_page PG_mask(7, 3) /* has writable mappings? */
-#define PGT_type_mask PG_mask(7, 3) /* Bits 29-31. */
-
- /* Owning guest has pinned this page to its current type? */
-#define _PGT_pinned PG_shift(4)
-#define PGT_pinned PG_mask(1, 4)
- /* Has this page been validated for use as its current type? */
-#define _PGT_validated PG_shift(5)
-#define PGT_validated PG_mask(1, 5)
-
- /* Count of uses of this frame as its current type. */
-#define PGT_count_width PG_shift(7)
-#define PGT_count_mask ((1UL<<PGT_count_width)-1)
-
- /* Cleared when the owning guest 'frees' this page. */
-#define _PGC_allocated PG_shift(1)
-#define PGC_allocated PG_mask(1, 1)
- /* Page is Xen heap? */
-# define _PGC_xen_heap PG_shift(2)
-# define PGC_xen_heap PG_mask(1, 2)
- /* bit PG_shift(3) reserved. See asm-x86/mm.h */
- /* PG_mask(7, 6) reserved. See asm-x86/mm.h*/
-
- /* Page is broken? */
-#define _PGC_broken PG_shift(7)
-#define PGC_broken PG_mask(1, 7)
-
- /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
-#define PGC_state PG_mask(3, 9)
-#define PGC_state_inuse PG_mask(0, 9)
-#define PGC_state_offlining PG_mask(1, 9)
-#define PGC_state_offlined PG_mask(2, 9)
-#define PGC_state_free PG_mask(3, 9)
-#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
-
- /* Count of references to this frame. */
-#define PGC_count_width PG_shift(9)
-#define PGC_count_mask ((1UL<<PGC_count_width)-1)
-
-extern unsigned long xen_fixed_mfn_start;
-extern unsigned long xen_fixed_mfn_end;
-#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
-#define is_xen_heap_mfn(mfn) (mfn_valid(mfn) && \
- is_xen_heap_page(mfn_to_page(mfn)))
-#define is_xen_fixed_mfn(mfn) \
- (xen_fixed_mfn_start <= (mfn) && (mfn) <= xen_fixed_mfn_end)
-
-#ifdef CONFIG_IA64_PICKLE_DOMAIN
-#define page_get_owner(_p) \
- ((struct domain *)((_p)->v.inuse._domain ? \
- mfn_to_virt((_p)->v.inuse._domain) : NULL))
-#define page_set_owner(_p,_d) \
- ((_p)->v.inuse._domain = (_d) ? virt_to_mfn(_d) : 0)
-#else
-#define page_get_owner(_p) ((struct domain *)(_p)->u.inuse._domain)
-#define page_set_owner(_p, _d) ((_p)->u.inuse._domain = (unsigned long)(_d))
-#endif
-
-#define XENSHARE_writable 0
-#define XENSHARE_readonly 1
-void share_xen_page_with_guest(struct page_info *page,
- struct domain *d, int readonly);
-void share_xen_page_with_privileged_guests(struct page_info *page,
- int readonly);
-
-extern unsigned long frametable_pg_dir[];
-extern struct page_info *frame_table;
-extern unsigned long frame_table_size;
-extern struct list_head free_list;
-extern spinlock_t free_list_lock;
-extern unsigned int free_pfns;
-extern unsigned long max_page;
-
-extern void __init init_frametable(void);
-void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
-
-static inline void put_page(struct page_info *page)
-{
- unsigned long nx, x, y = page->count_info;
-
- do {
- ASSERT((y & PGC_count_mask) != 0);
- x = y;
- nx = x - 1;
- }
- while (unlikely((y = cmpxchg_rel(&page->count_info, x, nx)) != x));
-
- if (unlikely((nx & PGC_count_mask) == 0))
- free_domheap_page(page);
-}
-
-static inline struct domain *page_get_owner_and_reference(
- struct page_info *page)
-{
- unsigned long x, y = page->count_info;
-
- do {
- x = y;
- /*
- * Count == 0: Page is not allocated, so we cannot take a reference.
- * Count == -1: Reference count would wrap, which is invalid.
- * Count == -2: Remaining unused ref is reserved for get_page_light().
- */
- /*
- * On ia64, get_page_light() isn't defined so that it doesn't
- * make sense to take care of Count == -2.
- * Just for consistency with x86.
- */
- if ( unlikely(((x + 2) & PGC_count_mask) <= 2) )
- return NULL;
- y = cmpxchg_acq(&page->count_info, x, x + 1);
- } while (unlikely(y != x));
-
- return page_get_owner(page);
-}
-
-/* count_info and ownership are checked atomically. */
-static inline int get_page(struct page_info *page,
- struct domain *domain)
-{
- struct domain *owner = page_get_owner_and_reference(page);
-
- if (likely(owner == domain))
- return 1;
-
- if (owner != NULL)
- put_page(page);
-
- /* if (!domain->is_dying) */ /* XXX: header inclusion hell */
- gdprintk(XENLOG_INFO,
- "Error pfn %lx: rd=%p, od=%p, caf=%016lx, taf=%" PRtype_info "\n",
- page_to_mfn(page), domain,
- owner, page->count_info, page->u.inuse.type_info);
- return 0;
-}
-
-int is_iomem_page(unsigned long mfn);
-
-extern void put_page_type(struct page_info *page);
-extern int get_page_type(struct page_info *page, unsigned long type);
-
-static inline void put_page_and_type(struct page_info *page)
-{
- put_page_type(page);
- put_page(page);
-}
-
-
-static inline int get_page_and_type(struct page_info *page,
- struct domain *domain,
- unsigned long type)
-{
- int rc = get_page(page, domain);
-
- if ( likely(rc) && unlikely(!get_page_type(page, type)) )
- {
- put_page(page);
- rc = 0;
- }
-
- return rc;
-}
-
-#define set_machinetophys(_mfn, _pfn) do { } while(0);
-
-#ifdef MEMORY_GUARD
-void *memguard_init(void *heap_start);
-void memguard_guard_stack(void *p);
-void memguard_guard_range(void *p, unsigned long l);
-void memguard_unguard_range(void *p, unsigned long l);
-#else
-#define memguard_init(_s) (_s)
-#define memguard_guard_stack(_p) ((void)0)
-#define memguard_guard_range(_p,_l) ((void)0)
-#define memguard_unguard_range(_p,_l) ((void)0)
-#endif
-
-// prototype of misc memory stuff
-//unsigned long __get_free_pages(unsigned int mask, unsigned int order);
-//void __free_pages(struct page_info *page, unsigned int order);
-void *pgtable_quicklist_alloc(void);
-void pgtable_quicklist_free(void *pgtable_entry);
-
-// FOLLOWING FROM linux-2.6.7/include/mm.h
-
-/*
- * This struct defines a memory VMM memory area. There is one of these
- * per VM-area/task. A VM area is any part of the process virtual memory
- * space that has a special rule for the page-fault handlers (ie a shared
- * library, the executable area etc).
- */
-struct vm_area_struct {
- struct mm_struct * vm_mm; /* The address space we belong to. */
- unsigned long vm_start; /* Our start address within vm_mm. */
- unsigned long vm_end; /* The first byte after our end address
- within vm_mm. */
-
- /* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next;
-
- pgprot_t vm_page_prot; /* Access permissions of this VMA. */
- unsigned long vm_flags; /* Flags, listed below. */
-
-#ifndef XEN
- struct rb_node vm_rb;
-
-// XEN doesn't need all the backing store stuff
- /*
- * For areas with an address space and backing store,
- * linkage into the address_space->i_mmap prio tree, or
- * linkage to the list of like vmas hanging off its node, or
- * linkage of vma in the address_space->i_mmap_nonlinear list.
- */
- union {
- struct {
- struct list_head list;
- void *parent; /* aligns with prio_tree_node parent */
- struct vm_area_struct *head;
- } vm_set;
-
- struct prio_tree_node prio_tree_node;
- } shared;
-
- /*
- * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
- * list, after a COW of one of the file pages. A MAP_SHARED vma
- * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
- * or brk vma (with NULL file) can only be in an anon_vma list.
- */
- struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
- struct anon_vma *anon_vma; /* Serialized by page_table_lock */
-
- /* Function pointers to deal with this struct. */
- struct vm_operations_struct * vm_ops;
-
- /* Information about our backing store: */
- unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
- units, *not* PAGE_CACHE_SIZE */
- struct file * vm_file; /* File we map to (can be NULL). */
- void * vm_private_data; /* was vm_pte (shared mem) */
-
-#ifdef CONFIG_NUMA
- struct mempolicy *vm_policy; /* NUMA policy for the VMA */
-#endif
-#endif
-};
-/*
- * vm_flags..
- */
-#define VM_READ 0x00000001 /* currently active flags */
-#define VM_WRITE 0x00000002
-#define VM_EXEC 0x00000004
-#define VM_SHARED 0x00000008
-
-#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
-#define VM_MAYWRITE 0x00000020
-#define VM_MAYEXEC 0x00000040
-#define VM_MAYSHARE 0x00000080
-
-#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
-#define VM_GROWSUP 0x00000200
-#define VM_SHM 0x00000400 /* shared memory area, don't swap out */
-#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
-
-#define VM_EXECUTABLE 0x00001000
-#define VM_LOCKED 0x00002000
-#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
-
- /* Used by sys_madvise() */
-#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
-#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
-
-#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
-#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
-#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
-#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
-#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
-#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
-
-#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
-#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
-#endif
-
-#ifdef CONFIG_STACK_GROWSUP
-#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
-#else
-#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
-#endif
-
-#if 0 /* removed when rebasing to 2.6.13 */
-/*
- * The zone field is never updated after free_area_init_core()
- * sets it, so none of the operations on it need to be atomic.
- * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
- * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
- */
-#define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
-#define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
-
-static inline unsigned long page_zonenum(struct page_info *page)
-{
- return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
-}
-static inline unsigned long page_to_nid(struct page_info *page)
-{
- return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
-}
-
-struct zone;
-extern struct zone *zone_table[];
-
-static inline struct zone *page_zone(struct page_info *page)
-{
- return zone_table[page->flags >> NODEZONE_SHIFT];
-}
-
-static inline void set_page_zone(struct page_info *page, unsigned long nodezone_num)
-{
- page->flags &= ~(~0UL << NODEZONE_SHIFT);
- page->flags |= nodezone_num << NODEZONE_SHIFT;
-}
-#endif
-
-#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
-extern unsigned long max_mapnr;
-#endif
-
-static inline void *lowmem_page_address(struct page_info *page)
-{
- return __va(page_to_mfn(page) << PAGE_SHIFT);
-}
-
-#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
-#define HASHED_PAGE_VIRTUAL
-#endif
-
-#if defined(WANT_PAGE_VIRTUAL)
-#define page_address(page) ((page)->virtual)
-#define set_page_address(page, address) \
- do { \
- (page)->virtual = (address); \
- } while(0)
-#define page_address_init() do { } while(0)
-#endif
-
-#if defined(HASHED_PAGE_VIRTUAL)
-void *page_address(struct page_info *page);
-void set_page_address(struct page_info *page, void *virtual);
-void page_address_init(void);
-#endif
-
-#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
-#define page_address(page) lowmem_page_address(page)
-#define set_page_address(page, address) do { } while(0)
-#define page_address_init() do { } while(0)
-#endif
-
-
-#ifndef CONFIG_DEBUG_PAGEALLOC
-static inline void
-kernel_map_pages(struct page_info *page, int numpages, int enable)
-{
-}
-#endif
-
-extern unsigned long num_physpages;
-extern unsigned long totalram_pages;
-extern int nr_swap_pages;
-
-extern void alloc_dom_xen_and_dom_io(void);
-extern int mm_teardown(struct domain* d);
-extern void mm_final_teardown(struct domain* d);
-extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr);
-extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
-extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags);
-extern void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);
-extern void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags);
-extern int deassign_domain_mmio_page(struct domain *d, unsigned long mpaddr,
- unsigned long phys_addr, unsigned long size);
-struct p2m_entry;
-extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr, struct p2m_entry* entry);
-extern void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr);
-extern volatile pte_t *lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr);
-extern unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr, unsigned long phys_addr, unsigned long size, unsigned long flags);
-extern unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, unsigned long size, unsigned long flags);
-int domain_page_mapped(struct domain *d, unsigned long mpaddr);
-int efi_mmio(unsigned long physaddr, unsigned long size);
-extern unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
-extern unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3);
-extern unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, unsigned int extent_order);
-extern unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn, unsigned long flags, domid_t domid);
-extern unsigned long dom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn, unsigned long gmfn, unsigned long flags, domid_t domid);
-#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
-extern void expose_p2m_init(void);
-extern unsigned long dom0vp_expose_p2m(struct domain* d, unsigned long conv_start_gpfn, unsigned long assign_start_gpfn, unsigned long expose_size, unsigned long granule_pfn);
-extern void foreign_p2m_init(struct domain* d);
-extern void foreign_p2m_destroy(struct domain* d);
-extern unsigned long dom0vp_expose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid, XEN_GUEST_HANDLE(char) buffer, unsigned long flags);
-extern unsigned long dom0vp_unexpose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid);
-extern unsigned long dom0vp_get_memmap(domid_t domid, XEN_GUEST_HANDLE(char) buffer);
-#else
-#define expose_p2m_init() do { } while (0)
-#define dom0vp_expose_p2m(d, conv_start_gpfn, assign_start_gpfn, expose_size, granule_pfn) (-ENOSYS)
-#define foreign_p2m_init(d) do { } while (0)
-#define foreign_p2m_destroy(d) do { } while (0)
-#define dom0vp_expose_foreign_p2m(dest_dom, dest_gpfn, domid, buffer, flags) (-ENOSYS)
-#define dom0vp_unexpose_foreign_p2m(dest_dom, dest_gpfn, domid) (-ENOSYS)
-#define __dom0vp_add_memdesc(d, memmap_info, memdesc) (-ENOSYS)
-#define dom0vp_get_memmap(domid, buffer) (-ENOSYS)
-#endif
-
-int
-p2m_pod_decrease_reservation(struct domain *d,
- xen_pfn_t gpfn, unsigned int order);
-int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
- unsigned int order);
-
-extern volatile unsigned long *mpt_table;
-extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
-extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__,
- u64* itir, struct p2m_entry* entry);
-#define machine_to_phys_mapping mpt_table
-
-#define INVALID_GFN (~0UL)
-#define INVALID_M2P_ENTRY (~0UL)
-#define VALID_M2P(_e) (!((_e) & (1UL<<63)))
-#define SHARED_M2P(_e) 0
-
-#define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
-#define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
-
-/* If pmt table is provided by control pannel later, we need __get_user
-* here. However if it's allocated by HV, we should access it directly
-*/
-
-#define mfn_to_gmfn(_d, mfn) \
- get_gpfn_from_mfn(mfn)
-
-#define gmfn_to_mfn(_d, gpfn) \
- gmfn_to_mfn_foreign((_d), (gpfn))
-
-#define get_gfn_untyped(d, gpfn) gmfn_to_mfn(d, gpfn)
-static inline void put_gfn(struct domain *d, unsigned long gfn) {}
-static inline void mem_event_cleanup(struct domain *d) {}
-
-#define __gpfn_invalid(_d, gpfn) \
- (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL) == INVALID_MFN)
-
-#define __gmfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
-
-#define __gpa_to_mpa(_d, gpa) \
- ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
-
-#define __mpa_to_gpa(madr) \
- ((get_gpfn_from_mfn((madr) >> PAGE_SHIFT) << PAGE_SHIFT) | \
- ((madr) & ~PAGE_MASK))
-
-/* Internal use only: returns 0 in case of bad address. */
-extern unsigned long paddr_to_maddr(unsigned long paddr);
-
-/* Arch-specific portion of memory_op hypercall. */
-long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
-
-int steal_page(
- struct domain *d, struct page_info *page, unsigned int memflags);
-int donate_page(
- struct domain *d, struct page_info *page, unsigned int memflags);
-
-#define domain_clamp_alloc_bitsize(d, b) (b)
-
-unsigned long domain_get_maximum_gpfn(struct domain *d);
-
-extern struct domain *dom_xen, *dom_io, *dom_cow; /* for vmcoreinfo */
-
-#endif /* __ASM_IA64_MM_H__ */
diff --git a/xen/include/asm-ia64/mmu_context.h b/xen/include/asm-ia64/mmu_context.h
deleted file mode 100644
index 5bee664448..0000000000
--- a/xen/include/asm-ia64/mmu_context.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __ASM_MMU_CONTEXT_H
-#define __ASM_MMU_CONTEXT_H
-//dummy file to resolve non-arch-indep include
-#ifdef XEN
-#define IA64_REGION_ID_KERNEL 0
-#define XEN_IA64_REGION_ID_EFI 1
-#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
-
-#ifndef __ASSEMBLY__
-struct ia64_ctx {
- spinlock_t lock;
- unsigned int next; /* next context number to use */
- unsigned int limit; /* next >= limit => must call wrap_mmu_context() */
- unsigned int max_ctx; /* max. context value supported by all CPUs */
-};
-
-extern struct ia64_ctx ia64_ctx;
-#endif /* ! __ASSEMBLY__ */
-#endif
-#endif /* ! __ASM_MMU_CONTEXT_H */
diff --git a/xen/include/asm-ia64/msi.h b/xen/include/asm-ia64/msi.h
deleted file mode 100644
index 9864cb3fa5..0000000000
--- a/xen/include/asm-ia64/msi.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __ASM_MSI_H
-#define __ASM_MSI_H
-
-/*
- * MSI Defined Data Structures
- */
-#define MSI_ADDRESS_HEADER 0xfee
-#define MSI_ADDRESS_HEADER_SHIFT 12
-#define MSI_ADDRESS_HEADER_MASK 0xfff000
-#define MSI_ADDRESS_DEST_ID_MASK 0xfff0000f
-#define MSI_TARGET_CPU_MASK 0xff
-#define MSI_TARGET_CPU_SHIFT 4
-#define MSI_DELIVERY_MODE 0
-#define MSI_LEVEL_MODE 1 /* Edge always assert */
-#define MSI_TRIGGER_MODE 0 /* MSI is edge sensitive */
-#define MSI_PHYSICAL_MODE 0
-#define MSI_LOGICAL_MODE 1
-#define MSI_REDIRECTION_HINT_MODE 0
-
-#define MSI_DATA_VECTOR_SHIFT 0
-#define MSI_DATA_VECTOR_MASK 0x000000ff
-#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK)
-
-struct msi_msg {
- u32 address_lo; /* low 32 bits of msi message address */
- u32 address_hi; /* high 32 bits of msi message address */
- u32 data; /* 16 bits of msi message data */
-};
-
-#endif /* __ASM_MSI_H */
diff --git a/xen/include/asm-ia64/multicall.h b/xen/include/asm-ia64/multicall.h
deleted file mode 100644
index 5002382359..0000000000
--- a/xen/include/asm-ia64/multicall.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __ASM_IA64_MULTICALL_H__
-#define __ASM_IA64_MULTICALL_H__
-
-#include <public/xen.h>
-#include <xen/errno.h>
-
-extern unsigned long ia64_do_multicall_call(
- unsigned long arg0,
- unsigned long arg1,
- unsigned long arg2,
- unsigned long arg3,
- unsigned long arg4,
- unsigned long arg5,
- unsigned long op);
-
-static inline void do_multicall_call(multicall_entry_t *call)
-{
- if (call->op < NR_hypercalls)
- call->result = ia64_do_multicall_call(
- call->args[0],
- call->args[1],
- call->args[2],
- call->args[3],
- call->args[4],
- call->args[5],
- call->op);
- else
- call->result = -ENOSYS;
-}
-
-#endif /* __ASM_IA64_MULTICALL_H__ */
diff --git a/xen/include/asm-ia64/offsets.h b/xen/include/asm-ia64/offsets.h
deleted file mode 100644
index dfc66d76a4..0000000000
--- a/xen/include/asm-ia64/offsets.h
+++ /dev/null
@@ -1,9 +0,0 @@
-//dummy file to resolve non-arch-indep include
-#ifndef __IA64_OFFSETS_H
-#define __IA64_OFFSETS_H
-
-#ifndef GENERATE_ASM_OFFSETS
-#include <asm/asm-offsets.h>
-#endif
-
-#endif /* __IA64_OFFSETS_H */
diff --git a/xen/include/asm-ia64/p2m_entry.h b/xen/include/asm-ia64/p2m_entry.h
deleted file mode 100644
index 268c7826f1..0000000000
--- a/xen/include/asm-ia64/p2m_entry.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/******************************************************************************
- * p2m_entry.h
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_P2M_ENTRY_H__
-#define __ASM_P2M_ENTRY_H__
-
-#include <asm/pgtable.h>
-
-struct p2m_entry {
-#define P2M_PTE_ALWAYS_RETRY ((volatile pte_t*) -1)
- volatile pte_t* ptep;
- pte_t used;
-};
-
-static inline void
-p2m_entry_set(struct p2m_entry* entry, volatile pte_t* ptep, pte_t used)
-{
- entry->ptep = ptep;
- entry->used = used;
-}
-
-static inline void
-p2m_entry_set_retry(struct p2m_entry* entry)
-{
- entry->ptep = P2M_PTE_ALWAYS_RETRY;
-}
-
-static inline int
-p2m_entry_retry(struct p2m_entry* entry)
-{
- /* XXX see lookup_domain_pte().
- NULL is set for invalid gpaddr for the time being. */
- if (entry->ptep == NULL)
- return 0;
-
- if (entry->ptep == P2M_PTE_ALWAYS_RETRY)
- return 1;
-
-#ifdef CONFIG_XEN_IA64_TLB_TRACK
- return ((pte_val(*entry->ptep) & ~_PAGE_TLB_TRACK_MASK) !=
- (pte_val(entry->used) & ~_PAGE_TLB_TRACK_MASK));
-#else
- return (pte_val(*entry->ptep) != pte_val(entry->used));
-#endif
-}
-
-#define p2m_get_hostp2m(d) (d)
-
-#endif // __ASM_P2M_ENTRY_H__
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-ia64/perfc.h b/xen/include/asm-ia64/perfc.h
deleted file mode 100644
index 40cd752098..0000000000
--- a/xen/include/asm-ia64/perfc.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef __ASM_PERFC_H__
-#define __ASM_PERFC_H__
-
-#include <asm/vhpt.h>
-#include <asm/privop_stat.h>
-
-static inline void arch_perfc_reset(void)
-{
- reset_privop_addrs();
-}
-
-static inline void arch_perfc_gather(void)
-{
- gather_vhpt_stats();
- gather_privop_addrs();
-}
-
-#endif
diff --git a/xen/include/asm-ia64/perfc_defn.h b/xen/include/asm-ia64/perfc_defn.h
deleted file mode 100644
index 42e541ace2..0000000000
--- a/xen/include/asm-ia64/perfc_defn.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/* This file is legitimately included multiple times. */
-
-PERFCOUNTER(dtlb_translate, "dtlb hit")
-
-PERFCOUNTER(tr_translate, "TR hit")
-
-PERFCOUNTER(vhpt_translate, "virtual vhpt translation")
-PERFCOUNTER(fast_vhpt_translate, "virtual vhpt fast translation")
-
-PERFCOUNTER(recover_to_page_fault, "recoveries to page fault")
-PERFCOUNTER(recover_to_break_fault, "recoveries to break fault")
-
-PERFCOUNTER(phys_translate, "metaphysical translation")
-
-PERFCOUNTER(idle_when_pending, "vcpu idle at event")
-
-PERFCOUNTER(pal_halt_light, "calls to pal_halt_light")
-
-PERFCOUNTER(lazy_cover, "lazy cover")
-
-PERFCOUNTER(mov_to_ar_imm, "privop mov_to_ar_imm")
-PERFCOUNTER(mov_to_ar_reg, "privop mov_to_ar_reg")
-PERFCOUNTER(mov_from_ar, "privop privified-mov_from_ar")
-PERFCOUNTER(ssm, "privop ssm")
-PERFCOUNTER(rsm, "privop rsm")
-PERFCOUNTER(rfi, "privop rfi")
-PERFCOUNTER(bsw0, "privop bsw0")
-PERFCOUNTER(bsw1, "privop bsw1")
-PERFCOUNTER(cover, "privop cover")
-PERFCOUNTER(fc, "privop privified-fc")
-PERFCOUNTER(cpuid, "privop privified-cpuid")
-
-PERFCOUNTER_ARRAY(mov_to_cr, "privop mov to cr", 128)
-PERFCOUNTER_ARRAY(mov_from_cr, "privop mov from cr", 128)
-
-PERFCOUNTER_ARRAY(misc_privop, "privop misc", 64)
-
-// privileged instructions to fall into vmx_entry
-PERFCOUNTER(vmx_rsm, "vmx privop rsm")
-PERFCOUNTER(vmx_ssm, "vmx privop ssm")
-PERFCOUNTER(vmx_mov_to_psr, "vmx privop mov_to_psr")
-PERFCOUNTER(vmx_mov_from_psr, "vmx privop mov_from_psr")
-PERFCOUNTER(vmx_mov_from_cr, "vmx privop mov_from_cr")
-PERFCOUNTER(vmx_mov_to_cr, "vmx privop mov_to_cr")
-PERFCOUNTER(vmx_bsw0, "vmx privop bsw0")
-PERFCOUNTER(vmx_bsw1, "vmx privop bsw1")
-PERFCOUNTER(vmx_cover, "vmx privop cover")
-PERFCOUNTER(vmx_rfi, "vmx privop rfi")
-PERFCOUNTER(vmx_itr_d, "vmx privop itr_d")
-PERFCOUNTER(vmx_itr_i, "vmx privop itr_i")
-PERFCOUNTER(vmx_ptr_d, "vmx privop ptr_d")
-PERFCOUNTER(vmx_ptr_i, "vmx privop ptr_i")
-PERFCOUNTER(vmx_itc_d, "vmx privop itc_d")
-PERFCOUNTER(vmx_itc_i, "vmx privop itc_i")
-PERFCOUNTER(vmx_ptc_l, "vmx privop ptc_l")
-PERFCOUNTER(vmx_ptc_g, "vmx privop ptc_g")
-PERFCOUNTER(vmx_ptc_ga, "vmx privop ptc_ga")
-PERFCOUNTER(vmx_ptc_e, "vmx privop ptc_e")
-PERFCOUNTER(vmx_mov_to_rr, "vmx privop mov_to_rr")
-PERFCOUNTER(vmx_mov_from_rr, "vmx privop mov_from_rr")
-PERFCOUNTER(vmx_thash, "vmx privop thash")
-PERFCOUNTER(vmx_ttag, "vmx privop ttag")
-PERFCOUNTER(vmx_tpa, "vmx privop tpa")
-PERFCOUNTER(vmx_tak, "vmx privop tak")
-PERFCOUNTER(vmx_mov_to_ar_imm, "vmx privop mov_to_ar_imm")
-PERFCOUNTER(vmx_mov_to_ar_reg, "vmx privop mov_to_ar_reg")
-PERFCOUNTER(vmx_mov_from_ar_reg, "vmx privop mov_from_ar_reg")
-PERFCOUNTER(vmx_mov_to_dbr, "vmx privop mov_to_dbr")
-PERFCOUNTER(vmx_mov_to_ibr, "vmx privop mov_to_ibr")
-PERFCOUNTER(vmx_mov_to_pmc, "vmx privop mov_to_pmc")
-PERFCOUNTER(vmx_mov_to_pmd, "vmx privop mov_to_pmd")
-PERFCOUNTER(vmx_mov_to_pkr, "vmx privop mov_to_pkr")
-PERFCOUNTER(vmx_mov_from_dbr, "vmx privop mov_from_dbr")
-PERFCOUNTER(vmx_mov_from_ibr, "vmx privop mov_from_ibr")
-PERFCOUNTER(vmx_mov_from_pmc, "vmx privop mov_from_pmc")
-PERFCOUNTER(vmx_mov_from_pkr, "vmx privop mov_from_pkr")
-PERFCOUNTER(vmx_mov_from_cpuid, "vmx privop mov_from_cpuid")
-
-
-PERFCOUNTER_ARRAY(slow_hyperprivop, "slow hyperprivops", HYPERPRIVOP_MAX + 1)
-PERFCOUNTER_ARRAY(fast_hyperprivop, "fast hyperprivops", HYPERPRIVOP_MAX + 1)
-
-PERFCOUNTER_ARRAY(slow_reflect, "slow reflection", 0x80)
-PERFCOUNTER_ARRAY(fast_reflect, "fast reflection", 0x80)
-
-PERFSTATUS(vhpt_nbr_entries, "nbr of entries per VHPT")
-PERFSTATUS(vhpt_valid_entries, "nbr of valid entries in VHPT")
-
-PERFCOUNTER_ARRAY(vmx_mmio_access, "vmx_mmio_access", 8)
-PERFCOUNTER(vmx_pal_emul, "vmx_pal_emul")
-PERFCOUNTER_ARRAY(vmx_switch_mm_mode, "vmx_switch_mm_mode", 8)
-PERFCOUNTER(vmx_ia64_handle_break,"vmx_ia64_handle_break")
-PERFCOUNTER_ARRAY(vmx_inject_guest_interruption,
- "vmx_inject_guest_interruption", 0x80)
-PERFCOUNTER_ARRAY(fw_hypercall, "fw_hypercall", 0x20)
-
-#ifdef CONFIG_PRIVOP_ADDRS
-#ifndef PERFPRIVOPADDR
-#define PERFPRIVOPADDR(name) \
-PERFSTATUS_ARRAY(privop_addr_##name##_addr, "privop-addrs addr " #name, \
- PRIVOP_COUNT_NADDRS) \
-PERFSTATUS_ARRAY(privop_addr_##name##_count, "privop-addrs count " #name, \
- PRIVOP_COUNT_NADDRS) \
-PERFSTATUS(privop_addr_##name##_overflow, "privop-addrs overflow " #name)
-#endif
-
-PERFPRIVOPADDR(get_ifa)
-PERFPRIVOPADDR(thash)
-
-#undef PERFPRIVOPADDR
-#endif
-
-// vhpt.c
-PERFCOUNTER(local_vhpt_flush, "local_vhpt_flush")
-PERFCOUNTER(vcpu_vhpt_flush, "vcpu_vhpt_flush")
-PERFCOUNTER(vcpu_flush_vtlb_all, "vcpu_flush_vtlb_all")
-PERFCOUNTER(domain_flush_vtlb_all, "domain_flush_vtlb_all")
-PERFCOUNTER(vcpu_flush_tlb_vhpt_range, "vcpu_flush_tlb_vhpt_range")
-PERFCOUNTER(domain_flush_vtlb_track_entry, "domain_flush_vtlb_track_entry")
-PERFCOUNTER(domain_flush_vtlb_local, "domain_flush_vtlb_local")
-PERFCOUNTER(domain_flush_vtlb_global, "domain_flush_vtlb_global")
-PERFCOUNTER(domain_flush_vtlb_range, "domain_flush_vtlb_range")
-
-// domain.c
-PERFCOUNTER(flush_vtlb_for_context_switch, "flush_vtlb_for_context_switch")
-
-// mm.c
-PERFCOUNTER(assign_domain_page_replace, "assign_domain_page_replace")
-PERFCOUNTER(assign_domain_pge_cmpxchg_rel, "assign_domain_pge_cmpxchg_rel")
-PERFCOUNTER(zap_domain_page_one, "zap_domain_page_one")
-PERFCOUNTER(dom0vp_zap_physmap, "dom0vp_zap_physmap")
-PERFCOUNTER(dom0vp_add_physmap, "dom0vp_add_physmap")
-PERFCOUNTER(create_grant_host_mapping, "create_grant_host_mapping")
-PERFCOUNTER(replace_grant_host_mapping, "replace_grant_host_mapping")
-PERFCOUNTER(steal_page_refcount, "steal_page_refcount")
-PERFCOUNTER(steal_page, "steal_page")
-PERFCOUNTER(guest_physmap_add_page, "guest_physmap_add_page")
-PERFCOUNTER(guest_physmap_remove_page, "guest_physmap_remove_page")
-PERFCOUNTER(domain_page_flush_and_put, "domain_page_flush_and_put")
-
-// dom0vp
-PERFCOUNTER(dom0vp_phystomach, "dom0vp_phystomach")
-PERFCOUNTER(dom0vp_machtophys, "dom0vp_machtophys")
-
-#ifdef CONFIG_XEN_IA64_TLB_TRACK
-// insert or dirty
-PERFCOUNTER(tlb_track_iod, "tlb_track_iod")
-PERFCOUNTER(tlb_track_iod_again, "tlb_track_iod_again")
-PERFCOUNTER(tlb_track_iod_not_tracked, "tlb_track_iod_not_tracked")
-PERFCOUNTER(tlb_track_iod_force_many, "tlb_track_iod_force_many")
-PERFCOUNTER(tlb_track_iod_tracked_many, "tlb_track_iod_tracked_many")
-PERFCOUNTER(tlb_track_iod_tracked_many_del, "tlb_track_iod_tracked_many_del")
-PERFCOUNTER(tlb_track_iod_found, "tlb_track_iod_found")
-PERFCOUNTER(tlb_track_iod_new_entry, "tlb_track_iod_new_entry")
-PERFCOUNTER(tlb_track_iod_new_failed, "tlb_track_iod_new_failed")
-PERFCOUNTER(tlb_track_iod_new_many, "tlb_track_iod_new_many")
-PERFCOUNTER(tlb_track_iod_insert, "tlb_track_iod_insert")
-PERFCOUNTER(tlb_track_iod_dirtied, "tlb_track_iod_dirtied")
-
-// search and remove
-PERFCOUNTER(tlb_track_sar, "tlb_track_sar")
-PERFCOUNTER(tlb_track_sar_not_tracked, "tlb_track_sar_not_tracked")
-PERFCOUNTER(tlb_track_sar_not_found, "tlb_track_sar_not_found")
-PERFCOUNTER(tlb_track_sar_found, "tlb_track_sar_found")
-PERFCOUNTER(tlb_track_sar_many, "tlb_track_sar_many")
-
-// flush
-PERFCOUNTER(tlb_track_use_rr7, "tlb_track_use_rr7")
-PERFCOUNTER(tlb_track_swap_rr0, "tlb_track_swap_rr0")
-#endif
-
-// tlb flush clock
-#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
-PERFCOUNTER(tlbflush_clock_cswitch_purge, "tlbflush_clock_cswitch_purge")
-PERFCOUNTER(tlbflush_clock_cswitch_skip, "tlbflush_clock_cswitch_skip")
-#endif
diff --git a/xen/include/asm-ia64/privop.h b/xen/include/asm-ia64/privop.h
deleted file mode 100644
index 64e73f473c..0000000000
--- a/xen/include/asm-ia64/privop.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef _XEN_IA64_PRIVOP_H
-#define _XEN_IA64_PRIVOP_H
-
-#include <asm/ia64_int.h>
-#include <asm/vcpu.h>
-
-extern IA64FAULT priv_emulate(VCPU *vcpu, REGS *regs, u64 isr);
-
-extern void privify_memory(void *start, u64 len);
-
-extern int ia64_hyperprivop(unsigned long iim, REGS *regs);
-
-#endif
diff --git a/xen/include/asm-ia64/privop_stat.h b/xen/include/asm-ia64/privop_stat.h
deleted file mode 100644
index 326b56d278..0000000000
--- a/xen/include/asm-ia64/privop_stat.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _XEN_IA64_PRIVOP_STAT_H
-#define _XEN_IA64_PRIVOP_STAT_H
-
-#include <xen/types.h>
-#include <public/xen.h>
-
-#ifdef CONFIG_PRIVOP_ADDRS
-
-extern void gather_privop_addrs(void);
-extern void reset_privop_addrs(void);
-
-#define PERFCOUNTER(var, name)
-#define PERFCOUNTER_ARRAY(var, name, size)
-
-#define PERFSTATUS(var, name)
-#define PERFSTATUS_ARRAY(var, name, size)
-
-#define PERFPRIVOPADDR(name) privop_inst_##name,
-
-enum privop_inst {
-#include <asm/perfc_defn.h>
-};
-
-#undef PERFCOUNTER
-#undef PERFCOUNTER_ARRAY
-
-#undef PERFSTATUS
-#undef PERFSTATUS_ARRAY
-
-#undef PERFPRIVOPADDR
-
-#define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
-extern void privop_count_addr(unsigned long addr, enum privop_inst inst);
-
-#else
-#define PRIVOP_COUNT_ADDR(x,y) do {} while (0)
-#define gather_privop_addrs() do {} while (0)
-#define reset_privop_addrs() do {} while (0)
-#endif
-
-#endif /* _XEN_IA64_PRIVOP_STAT_H */
diff --git a/xen/include/asm-ia64/regionreg.h b/xen/include/asm-ia64/regionreg.h
deleted file mode 100644
index efd1732622..0000000000
--- a/xen/include/asm-ia64/regionreg.h
+++ /dev/null
@@ -1,111 +0,0 @@
-
-#ifndef _REGIONREG_H_
-#define _REGIONREG_H_
-
-#define XEN_DEFAULT_RID 7
-#define IA64_MIN_IMPL_RID_MSB 17
-#define _REGION_ID(x) ({ia64_rr _v; _v.rrval = (long)(x); _v.rid;})
-#define _REGION_PAGE_SIZE(x) ({ia64_rr _v; _v.rrval = (long)(x); _v.ps;})
-#define _REGION_HW_WALKER(x) ({ia64_rr _v; _v.rrval = (long)(x); _v.ve;})
-#define _MAKE_RR(r, sz, v) ({ia64_rr _v; _v.rrval=0; _v.rid=(r); \
- _v.ps=(sz); _v.ve=(v); _v.rrval;})
-
-typedef union ia64_rr {
- struct {
- unsigned long ve : 1; /* enable hw walker */
- unsigned long reserved0 : 1; /* reserved */
- unsigned long ps : 6; /* log page size */
- unsigned long rid : 24; /* region id */
- unsigned long reserved1 : 32; /* reserved */
- };
- unsigned long rrval;
-} ia64_rr;
-
-//
-// region register macros
-//
-#define RR_TO_VE(arg) (((arg) >> 0) & 0x0000000000000001)
-#define RR_VE(arg) (((arg) & 0x0000000000000001) << 0)
-#define RR_VE_MASK 0x0000000000000001L
-#define RR_VE_SHIFT 0
-#define RR_TO_PS(arg) (((arg) >> 2) & 0x000000000000003f)
-#define RR_PS(arg) (((arg) & 0x000000000000003f) << 2)
-#define RR_PS_MASK 0x00000000000000fcL
-#define RR_PS_SHIFT 2
-#define RR_TO_RID(arg) (((arg) >> 8) & 0x0000000000ffffff)
-#define RR_RID(arg) (((arg) & 0x0000000000ffffff) << 8)
-#define RR_RID_MASK 0x00000000ffffff00L
-
-DECLARE_PER_CPU(unsigned long, domain_shared_info);
-DECLARE_PER_CPU(unsigned long, inserted_vhpt);
-DECLARE_PER_CPU(unsigned long, inserted_shared_info);
-DECLARE_PER_CPU(unsigned long, inserted_mapped_regs);
-DECLARE_PER_CPU(unsigned long, inserted_vpd);
-
-extern cpumask_t percpu_set;
-
-int set_one_rr(unsigned long rr, unsigned long val);
-int set_one_rr_efi(unsigned long rr, unsigned long val);
-void set_one_rr_efi_restore(unsigned long rr, unsigned long val);
-
-// This function is purely for performance... apparently scrambling
-// bits in the region id makes for better hashing, which means better
-// use of the VHPT, which means better performance
-// Note that the only time a RID should be mangled is when it is stored in
-// a region register; anytime it is "viewable" outside of this module,
-// it should be unmangled
-
-// NOTE: this function is also implemented in assembly code in hyper_set_rr!!
-// Must ensure these two remain consistent!
-static inline unsigned long
-vmMangleRID(unsigned long RIDVal)
-{
- union bits64 {
- unsigned char bytes[4];
- unsigned long uint;
- };
- union bits64 t;
- unsigned char tmp;
-
- t.uint = RIDVal;
- tmp = t.bytes[1];
- t.bytes[1] = t.bytes[3];
- t.bytes[3] = tmp;
-
- return t.uint;
-}
-
-// since vmMangleRID is symmetric, use it for unmangling also
-#define vmUnmangleRID(x) vmMangleRID(x)
-
-extern void init_rid_allocator (void);
-
-struct domain;
-
-/* Allocate RIDs range and metaphysical RIDs for domain d.
- If ridbits is 0, a default value is used instead. */
-extern int allocate_rid_range(struct domain *d, unsigned long ridbits);
-extern int deallocate_rid_range(struct domain *d);
-
-struct vcpu;
-extern void init_all_rr(struct vcpu *v);
-
-extern void set_virtual_rr0(void);
-extern void set_metaphysical_rr0(void);
-
-extern void load_region_regs(struct vcpu *v);
-
-extern int is_reserved_rr_rid(struct vcpu *vcpu, u64 reg_value);
-extern int is_reserved_rr_field(struct vcpu *vcpu, u64 reg_value);
-
-#endif /* !_REGIONREG_H_ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-ia64/regs.h b/xen/include/asm-ia64/regs.h
deleted file mode 100644
index 9cbd09ec72..0000000000
--- a/xen/include/asm-ia64/regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm/ptrace.h>
diff --git a/xen/include/asm-ia64/shadow.h b/xen/include/asm-ia64/shadow.h
deleted file mode 100644
index b04fb3c028..0000000000
--- a/xen/include/asm-ia64/shadow.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/******************************************************************************
- * include/asm-ia64/shadow.h
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef _XEN_SHADOW_H
-#define _XEN_SHADOW_H
-
-#include <xen/config.h>
-
-#ifndef CONFIG_SHADOW
-# error "CONFIG_SHADOW must be defined"
-#endif
-
-#define shadow_drop_references(d, p) ((void)0)
-
-// this is used only x86-specific code
-//#define shadow_sync_and_drop_references(d, p) ((void)0)
-
-#define shadow_mode_translate(d) (1)
-
-/*
- * Utilities to change relationship of gpfn->mfn for designated domain,
- * which is required by gnttab transfer, balloon, device model and etc.
- */
-int guest_physmap_add_page(struct domain *d, unsigned long gpfn,
- unsigned long mfn, unsigned int page_order);
-void guest_physmap_remove_page(struct domain *d, unsigned long gpfn,
- unsigned long mfn, unsigned int page_order);
-
-static inline int
-shadow_mode_enabled(struct domain *d)
-{
- return d->arch.shadow_bitmap != NULL;
-}
-
-static inline int
-shadow_mark_page_dirty(struct domain *d, unsigned long gpfn)
-{
- if (gpfn < d->arch.shadow_bitmap_size * 8
- && !test_and_set_bit(gpfn, d->arch.shadow_bitmap)) {
- /* The page was not dirty. */
- atomic64_inc(&d->arch.shadow_dirty_count);
- return 1;
- } else
- return 0;
-}
-
-#endif // _XEN_SHADOW_H
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
-
diff --git a/xen/include/asm-ia64/shared.h b/xen/include/asm-ia64/shared.h
deleted file mode 100644
index 4f1ebb2a5d..0000000000
--- a/xen/include/asm-ia64/shared.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __XEN_ASM_SHARED_H__
-#define __XEN_ASM_SHARED_H__
-
-#endif /* __XEN_ASM_SHARED_H__ */
diff --git a/xen/include/asm-ia64/sioemu.h b/xen/include/asm-ia64/sioemu.h
deleted file mode 100644
index 38ed407e90..0000000000
--- a/xen/include/asm-ia64/sioemu.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/******************************************************************************
- * sioemu.h
- *
- * Copyright (c) 2008 Tristan Gingold <tgingold@free.fr>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_SIOEMU_H_
-#define __ASM_SIOEMU_H_
-extern void sioemu_hypercall (struct pt_regs *regs);
-extern void sioemu_deliver_event (void);
-extern void sioemu_io_emulate (unsigned long padr, unsigned long data,
- unsigned long data1, unsigned long word);
-extern void sioemu_sal_assist (struct vcpu *v);
-#endif /* __ASM_SIOEMU_H_ */
diff --git a/xen/include/asm-ia64/slab.h b/xen/include/asm-ia64/slab.h
deleted file mode 100644
index a3239a4cbc..0000000000
--- a/xen/include/asm-ia64/slab.h
+++ /dev/null
@@ -1,3 +0,0 @@
-#include <xen/xmalloc.h>
-#include <linux/gfp.h>
-#include <asm/delay.h>
diff --git a/xen/include/asm-ia64/softirq.h b/xen/include/asm-ia64/softirq.h
deleted file mode 100644
index 9e5a03c482..0000000000
--- a/xen/include/asm-ia64/softirq.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_SOFTIRQ_H__
-#define __ASM_SOFTIRQ_H__
-
-#define CMC_DISABLE_SOFTIRQ (NR_COMMON_SOFTIRQS + 0)
-#define CMC_ENABLE_SOFTIRQ (NR_COMMON_SOFTIRQS + 1)
-
-#define NR_ARCH_SOFTIRQS 2
-
-#endif /* __ASM_SOFTIRQ_H__ */
diff --git a/xen/include/asm-ia64/time.h b/xen/include/asm-ia64/time.h
deleted file mode 100644
index 2361901ea6..0000000000
--- a/xen/include/asm-ia64/time.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_TIME_H_
-#define _ASM_TIME_H_
-
-#include <asm/linux/time.h>
-#include <asm/timex.h>
-
-struct tm;
-struct tm wallclock_time(void);
-
-void get_wallclock(uint64_t *sec, uint64_t *nsec, uint64_t *now);
-
-#endif /* _ASM_TIME_H_ */
diff --git a/xen/include/asm-ia64/tlb.h b/xen/include/asm-ia64/tlb.h
deleted file mode 100644
index 3ccfaff397..0000000000
--- a/xen/include/asm-ia64/tlb.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef XEN_ASM_IA64_TLB_H
-#define XEN_ASM_IA64_TLB_H
-
-#define NITRS 12
-#define NDTRS 12
-
-union pte_flags {
- struct {
- unsigned long p : 1; // 0
- unsigned long : 1; // 1
- unsigned long ma : 3; // 2-4
- unsigned long a : 1; // 5
- unsigned long d : 1; // 6
- unsigned long pl : 2; // 7-8
- unsigned long ar : 3; // 9-11
- unsigned long ppn : 38; // 12-49
- unsigned long : 2; // 50-51
- unsigned long ed : 1; // 52
- };
- unsigned long val;
-};
-
-typedef struct {
- volatile union pte_flags pte;
- union {
- struct {
- unsigned long : 2; // 0-1
- unsigned long ps : 6; // 2-7
- unsigned long key : 24; // 8-31
- unsigned long : 32; // 32-63
- };
- unsigned long itir;
- };
-
- unsigned long vadr;
- unsigned long rid;
-} TR_ENTRY;
-
-#endif
diff --git a/xen/include/asm-ia64/tlb_track.h b/xen/include/asm-ia64/tlb_track.h
deleted file mode 100644
index 503e7ef4cd..0000000000
--- a/xen/include/asm-ia64/tlb_track.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/******************************************************************************
- * tlb_track.h
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __TLB_TRACK_H__
-#define __TLB_TRACK_H__
-
-#ifdef CONFIG_XEN_IA64_TLB_TRACK
-
-#include <xen/sched.h>
-#include <xen/perfc.h>
-#include <asm/domain.h>
-#include <xen/list.h>
-#include <asm/p2m_entry.h>
-#include <asm/vcpumask.h>
-
-// TODO: compact this structure.
-struct tlb_track_entry {
- struct list_head list;
-
- volatile pte_t* ptep; // corresponding p2m entry
-
- /* XXX should we use TR_ENTRY? */
- pte_t pte_val; // mfn and other flags
- // pte_val.p = 1:
- // tlb entry is inserted.
- // pte_val.p = 0:
- // once tlb entry is inserted, so
- // this entry is created. But tlb
- // purge is isseued, so this
- // virtual address need not to be
- // purged.
- unsigned long vaddr; // virtual address
- unsigned long rid; // rid
-
- cpumask_t pcpu_dirty_mask;
- vcpumask_t vcpu_dirty_mask;
-
-#ifdef CONFIG_TLB_TRACK_CNT
-#define TLB_TRACK_CNT_FORCE_MANY 256 /* XXX how many? */
- unsigned long cnt;
-#endif
-};
-
-struct tlb_track {
-
-/* see __gnttab_map_grant_ref()
- A domain can map granted-page up to MAPTRACK_MAX_ENTRIES pages. */
-#define TLB_TRACK_LIMIT_ENTRIES \
- (MAPTRACK_MAX_ENTRIES * (PAGE_SIZE / sizeof(struct tlb_track)))
-
- spinlock_t free_list_lock;
- struct list_head free_list;
- unsigned int limit;
- unsigned int num_entries;
- unsigned int num_free;
- struct page_list_head page_list;
-
- /* XXX hash table size */
- spinlock_t hash_lock;
- unsigned int hash_size;
- unsigned int hash_shift;
- unsigned int hash_mask;
- struct list_head* hash;
-};
-
-int tlb_track_create(struct domain* d);
-void tlb_track_destroy(struct domain* d);
-
-void tlb_track_free_entry(struct tlb_track* tlb_track,
- struct tlb_track_entry* entry);
-
-void
-__vcpu_tlb_track_insert_or_dirty(struct vcpu *vcpu, unsigned long vaddr,
- struct p2m_entry* entry);
-static inline void
-vcpu_tlb_track_insert_or_dirty(struct vcpu *vcpu, unsigned long vaddr,
- struct p2m_entry* entry)
-{
- /* optimization.
- non-tracking pte is most common. */
- perfc_incr(tlb_track_iod);
- if (!pte_tlb_tracking(entry->used)) {
- perfc_incr(tlb_track_iod_not_tracked);
- return;
- }
-
- __vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);
-}
-
-
-/* return value
- * NULL if this entry is used
- * entry if this entry isn't used
- */
-enum TLB_TRACK_RET {
- TLB_TRACK_NOT_TRACKED,
- TLB_TRACK_NOT_FOUND,
- TLB_TRACK_FOUND,
- TLB_TRACK_MANY,
- TLB_TRACK_AGAIN,
-};
-typedef enum TLB_TRACK_RET TLB_TRACK_RET_T;
-
-TLB_TRACK_RET_T
-tlb_track_search_and_remove(struct tlb_track* tlb_track,
- volatile pte_t* ptep, pte_t old_pte,
- struct tlb_track_entry** entryp);
-
-void
-__tlb_track_entry_printf(const char* func, int line,
- const struct tlb_track_entry* entry);
-#define tlb_track_entry_printf(entry) \
- __tlb_track_entry_printf(__func__, __LINE__, (entry))
-#else
-//define as nop
-#define tlb_track_create(d) do { } while (0)
-#define tlb_track_destroy(d) do { } while (0)
-#define tlb_track_free_entry(tlb_track, entry) do { } while (0)
-#define vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry) \
- do { } while (0)
-#define tlb_track_search_and_remove(tlb_track, ptep, old_pte, entryp) \
- do { } while (0)
-#define tlb_track_entry_printf(entry) do { } while (0)
-#endif /* CONFIG_XEN_IA64_TLB_TRACK */
-
-#endif /* __TLB_TRACK_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-ia64/tlbflush.h b/xen/include/asm-ia64/tlbflush.h
deleted file mode 100644
index 705b8717e1..0000000000
--- a/xen/include/asm-ia64/tlbflush.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef __FLUSHTLB_H__
-#define __FLUSHTLB_H__
-
-struct vcpu;
-struct domain;
-
-/* TLB flushes can be either local (current vcpu only) or domain wide (on
- all vcpus).
- TLB flushes can be either all-flush or range only.
-
- vTLB flushing means flushing VCPU virtual TLB + machine TLB + machine VHPT.
-*/
-
-/* Local all flush of vTLB. */
-void vcpu_flush_vtlb_all(struct vcpu *v);
-
-/* Local range flush of machine TLB only (not full VCPU virtual TLB!!!) */
-void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range);
-
-/* Global all flush of vTLB */
-void domain_flush_vtlb_all(struct domain *d);
-
-/* Global range-flush of vTLB. */
-void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range);
-
-#ifdef CONFIG_XEN_IA64_TLB_TRACK
-struct tlb_track_entry;
-void __domain_flush_vtlb_track_entry(struct domain* d,
- const struct tlb_track_entry* entry);
-/* Global entry-flush of vTLB */
-void domain_flush_vtlb_track_entry(struct domain* d,
- const struct tlb_track_entry* entry);
-#endif
-
-/* Flush vhpt and mTLB on every dirty cpus. */
-void domain_flush_tlb_vhpt(struct domain *d);
-
-/* Flush vhpt and mTLB for log-dirty mode. */
-void flush_tlb_for_log_dirty(struct domain *d);
-
-/* Flush v-tlb on cpus set in mask for current domain. */
-void flush_tlb_mask(const cpumask_t *mask);
-
-/* Flush local machine TLB. */
-void local_flush_tlb_all (void);
-
-#define tlbflush_filter(x,y) ((void)0)
-
-#endif
diff --git a/xen/include/asm-ia64/trace.h b/xen/include/asm-ia64/trace.h
deleted file mode 100644
index edef1bb099..0000000000
--- a/xen/include/asm-ia64/trace.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASM_TRACE_H__
-#define __ASM_TRACE_H__
-
-#endif /* __ASM_TRACE_H__ */
diff --git a/xen/include/asm-ia64/uaccess.h b/xen/include/asm-ia64/uaccess.h
deleted file mode 100644
index 2ececb1046..0000000000
--- a/xen/include/asm-ia64/uaccess.h
+++ /dev/null
@@ -1,296 +0,0 @@
-#ifndef _ASM_IA64_UACCESS_H
-#define _ASM_IA64_UACCESS_H
-
-/*
- * This file defines various macros to transfer memory areas across
- * the user/kernel boundary. This needs to be done carefully because
- * this code is executed in kernel mode and uses user-specified
- * addresses. Thus, we need to be careful not to let the user to
- * trick us into accessing kernel memory that would normally be
- * inaccessible. This code is also fairly performance sensitive,
- * so we want to spend as little time doing safety checks as
- * possible.
- *
- * To make matters a bit more interesting, these macros sometimes also
- * called from within the kernel itself, in which case the address
- * validity check must be skipped. The get_fs() macro tells us what
- * to do: if get_fs()==USER_DS, checking is performed, if
- * get_fs()==KERNEL_DS, checking is bypassed.
- *
- * Note that even if the memory area specified by the user is in a
- * valid address range, it is still possible that we'll get a page
- * fault while accessing it. This is handled by filling out an
- * exception handler fixup entry for each instruction that has the
- * potential to fault. When such a fault occurs, the page fault
- * handler checks to see whether the faulting instruction has a fixup
- * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
- * then resumes execution at the continuation point.
- *
- * Based on <asm-alpha/uaccess.h>.
- *
- * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/page-flags.h>
-#include <linux/mm.h>
-
-#include <asm/intrinsics.h>
-#include <asm/pgtable.h>
-#include <asm/io.h>
-
-#define __access_ok(addr) (!IS_VMM_ADDRESS((unsigned long)(addr)))
-#define access_ok(addr, size) (__access_ok(addr))
-#define array_access_ok(addr,count,size)( __access_ok(addr))
-
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- *
- * Careful to not
- * (a) re-use the arguments for side effects (sizeof/typeof is ok)
- * (b) require any knowledge of processes at this stage
- */
-#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs())
-#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
-
-/*
- * The "__xxx" versions do not do address space checking, useful when
- * doing multiple accesses to the same area (the programmer has to do the
- * checks by hand with "access_ok()")
- */
-#define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
-#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
-
-extern long __put_user_unaligned_unknown (void);
-
-#define __put_user_unaligned(x, ptr) \
-({ \
- long __ret; \
- switch (sizeof(*(ptr))) { \
- case 1: __ret = __put_user((x), (ptr)); break; \
- case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \
- | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
- case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \
- | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
- case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \
- | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
- default: __ret = __put_user_unaligned_unknown(); \
- } \
- __ret; \
-})
-
-extern long __get_user_unaligned_unknown (void);
-
-#define __get_user_unaligned(x, ptr) \
-({ \
- long __ret; \
- switch (sizeof(*(ptr))) { \
- case 1: __ret = __get_user((x), (ptr)); break; \
- case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \
- | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
- case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \
- | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
- case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \
- | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
- default: __ret = __get_user_unaligned_unknown(); \
- } \
- __ret; \
-})
-
-#ifdef ASM_SUPPORTED
- struct __large_struct { unsigned long buf[100]; };
-# define __m(x) (*(struct __large_struct __user *)(x))
-
-/* We need to declare the __ex_table section before we can use it in .xdata. */
-asm (".section \"__ex_table\", \"a\"\n\t.previous");
-
-# define __get_user_size(val, addr, n, err) \
-do { \
- register long __gu_r8 asm ("r8") = 0; \
- register long __gu_r9 asm ("r9"); \
- asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
- "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \
- "[1:]" \
- : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \
- (err) = __gu_r8; \
- (val) = __gu_r9; \
-} while (0)
-
-/*
- * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This
- * is because they do not write to any memory gcc knows about, so there are no aliasing
- * issues.
- */
-# define __put_user_size(val, addr, n, err) \
-do { \
- register long __pu_r8 asm ("r8") = 0; \
- asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
- "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \
- "[1:]" \
- : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \
- (err) = __pu_r8; \
-} while (0)
-
-#else /* !ASM_SUPPORTED */
-# define RELOC_TYPE 2 /* ip-rel */
-# define __get_user_size(val, addr, n, err) \
-do { \
- __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \
- (err) = ia64_getreg(_IA64_REG_R8); \
- (val) = ia64_getreg(_IA64_REG_R9); \
-} while (0)
-# define __put_user_size(val, addr, n, err) \
-do { \
- __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \
- (err) = ia64_getreg(_IA64_REG_R8); \
-} while (0)
-#endif /* !ASM_SUPPORTED */
-
-extern void __get_user_unknown (void);
-
-/*
- * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
- * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while
- * using r8/r9.
- */
-#define __do_get_user(check, x, ptr, size, segment) \
-({ \
- const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
- __typeof__ (size) __gu_size = (size); \
- long __gu_err = -EFAULT, __gu_val = 0; \
- \
- if (!check || __access_ok(__gu_ptr)) \
- switch (__gu_size) { \
- case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \
- case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \
- case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \
- case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \
- default: __get_user_unknown(); break; \
- } \
- (x) = (__typeof__(*(__gu_ptr))) __gu_val; \
- __gu_err; \
-})
-
-#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS)
-#define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment)
-
-extern void __put_user_unknown (void);
-
-/*
- * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
- * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8.
- */
-#define __do_put_user(check, x, ptr, size, segment) \
-({ \
- __typeof__ (x) __pu_x = (x); \
- __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
- __typeof__ (size) __pu_size = (size); \
- long __pu_err = -EFAULT; \
- \
- if (!check || __access_ok(__pu_ptr)) \
- switch (__pu_size) { \
- case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \
- case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \
- case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \
- case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \
- default: __put_user_unknown(); break; \
- } \
- __pu_err; \
-})
-
-#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS)
-#define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment)
-
-/*
- * Complex access routines
- */
-extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
- unsigned long count);
-
-static inline unsigned long
-__copy_to_user (void __user *to, const void *from, unsigned long count)
-{
- return __copy_user(to, (void __user *)from, count);
-}
-
-static inline unsigned long
-__copy_from_user (void *to, const void __user *from, unsigned long count)
-{
- return __copy_user((void __user *)to, from, count);
-}
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-#define copy_to_user(to, from, n) \
-({ \
- void __user *__cu_to = (to); \
- const void *__cu_from = (from); \
- long __cu_len = (n); \
- \
- if (__access_ok(__cu_to)) \
- __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len); \
- __cu_len; \
-})
-
-extern unsigned long __do_clear_user (void __user * to, unsigned long count);
-
-#define clear_user(to, n) \
-({ \
- void __user *__cu_to = (to); \
- long __cu_len = (n); \
- \
- if (__access_ok(__cu_to)) \
- __cu_len = __do_clear_user(__cu_to, __cu_len); \
- __cu_len; \
-})
-
-#define copy_from_user(to, from, n) \
-({ \
- void *__cu_to = (to); \
- const void __user *__cu_from = (from); \
- long __cu_len = (n); \
- \
- __chk_user_ptr(__cu_from); \
- if (__access_ok(__cu_from)) \
- __cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len); \
- __cu_len; \
-})
-
-#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
-
-static inline unsigned long
-copy_in_user (void __user *to, const void __user *from, unsigned long n)
-{
- if (likely(access_ok(from, n) && access_ok(to, n)))
- n = __copy_user(to, from, n);
- return n;
-}
-
-#define ARCH_HAS_SORT_EXTABLE
-#define ARCH_HAS_SEARCH_EXTABLE
-
-struct exception_table_entry {
- int addr; /* location-relative address of insn this fixup is for */
- int cont; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
-};
-
-extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e);
-extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
-
-static inline int
-ia64_done_with_exception (struct pt_regs *regs)
-{
- const struct exception_table_entry *e;
- e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
- if (e) {
- ia64_handle_exception(regs, e);
- return 1;
- }
- return 0;
-}
-
-#endif /* _ASM_IA64_UACCESS_H */
diff --git a/xen/include/asm-ia64/vcpu.h b/xen/include/asm-ia64/vcpu.h
deleted file mode 100644
index abd2897391..0000000000
--- a/xen/include/asm-ia64/vcpu.h
+++ /dev/null
@@ -1,428 +0,0 @@
-#ifndef _XEN_IA64_VCPU_H
-#define _XEN_IA64_VCPU_H
-
-// TODO: Many (or perhaps most) of these should eventually be
-// static inline functions
-
-#include <asm/delay.h>
-#include <asm/fpu.h>
-#include <asm/tlb.h>
-#include <asm/ia64_int.h>
-#include <asm/privop_stat.h>
-#include <xen/types.h>
-#include <public/xen.h>
-#include <linux/acpi.h>
-struct vcpu;
-typedef struct vcpu VCPU;
-typedef struct cpu_user_regs REGS;
-
-extern u64 cycle_to_ns(u64 cycle);
-
-/* Note: PSCB stands for Privilegied State Communication Block. */
-#define VCPU(_v,_x) (_v->arch.privregs->_x)
-#define PSCB(_v,_x) VCPU(_v,_x)
-#define PSCBX(_v,_x) (_v->arch._x)
-#define FP_PSR(_v) PSCBX(_v, fp_psr)
-
-#define SPURIOUS_VECTOR 0xf
-
-/* general registers */
-extern u64 vcpu_get_gr(VCPU * vcpu, unsigned long reg);
-extern IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val);
-extern IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value,
- int nat);
-extern IA64FAULT vcpu_get_fpreg(VCPU * vcpu, unsigned long reg,
- struct ia64_fpreg *val);
-
-extern IA64FAULT vcpu_set_fpreg(VCPU * vcpu, unsigned long reg,
- struct ia64_fpreg *val);
-
-/* application registers */
-extern void vcpu_load_kernel_regs(VCPU * vcpu);
-extern IA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val);
-extern IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val);
-/* psr */
-extern BOOLEAN vcpu_get_psr_ic(VCPU * vcpu);
-extern u64 vcpu_get_psr(VCPU * vcpu);
-extern IA64FAULT vcpu_set_psr(VCPU * vcpu, u64 val);
-extern IA64FAULT vcpu_get_psr_masked(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm);
-extern IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm);
-extern IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val);
-extern IA64FAULT vcpu_set_psr_i(VCPU * vcpu);
-extern IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu);
-extern IA64FAULT vcpu_set_psr_dt(VCPU * vcpu);
-
-/**************************************************************************
- VCPU control register access routines
-**************************************************************************/
-
-static inline IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCB(vcpu, dcr);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_get_iva(VCPU * vcpu, u64 * pval)
-{
- if (VMX_DOMAIN(vcpu))
- *pval = PSCB(vcpu, iva) & ~0x7fffL;
- else
- *pval = PSCBX(vcpu, iva) & ~0x7fffL;
-
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_get_pta(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCB(vcpu, pta);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_get_ipsr(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCB(vcpu, ipsr);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_get_isr(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCB(vcpu, isr);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_get_iip(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCB(vcpu, iip);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_get_ifa(VCPU * vcpu, u64 * pval)
-{
- PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_get_ifa);
- *pval = PSCB(vcpu, ifa);
- return IA64_NO_FAULT;
-}
-
-static inline unsigned long vcpu_get_rr_ps(VCPU * vcpu, u64 vadr)
-{
- ia64_rr rr;
-
- rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
- return rr.ps;
-}
-
-static inline unsigned long vcpu_get_rr_rid(VCPU * vcpu, u64 vadr)
-{
- ia64_rr rr;
-
- rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
- return rr.rid;
-}
-
-static inline unsigned long vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa)
-{
- ia64_rr rr;
-
- rr.rrval = 0;
- rr.ps = vcpu_get_rr_ps(vcpu, ifa);
- rr.rid = vcpu_get_rr_rid(vcpu, ifa);
- return rr.rrval;
-}
-
-static inline IA64FAULT vcpu_get_itir(VCPU * vcpu, u64 * pval)
-{
- u64 val = PSCB(vcpu, itir);
- *pval = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_get_iipa(VCPU * vcpu, u64 * pval)
-{
- u64 val = PSCB(vcpu, iipa);
- *pval = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_get_ifs(VCPU * vcpu, u64 * pval)
-{
- *pval = PSCB(vcpu, ifs);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_get_iim(VCPU * vcpu, u64 * pval)
-{
- u64 val = PSCB(vcpu, iim);
- *pval = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_get_iha(VCPU * vcpu, u64 * pval)
-{
- PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_thash);
- *pval = PSCB(vcpu, iha);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_dcr(VCPU * vcpu, u64 val)
-{
- PSCB(vcpu, dcr) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_iva(VCPU * vcpu, u64 val)
-{
- if (VMX_DOMAIN(vcpu))
- PSCB(vcpu, iva) = val & ~0x7fffL;
- else
- PSCBX(vcpu, iva) = val & ~0x7fffL;
-
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_pta(VCPU * vcpu, u64 val)
-{
- if (val & (0x3f << 9)) /* reserved fields */
- return IA64_RSVDREG_FAULT;
- if (val & 2) /* reserved fields */
- return IA64_RSVDREG_FAULT;
- PSCB(vcpu, pta) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_ipsr(VCPU * vcpu, u64 val)
-{
- PSCB(vcpu, ipsr) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_isr(VCPU * vcpu, u64 val)
-{
- PSCB(vcpu, isr) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_iip(VCPU * vcpu, u64 val)
-{
- PSCB(vcpu, iip) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_increment_iip(VCPU * vcpu)
-{
- REGS *regs = vcpu_regs(vcpu);
- regs_increment_iip(regs);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_decrement_iip(VCPU * vcpu)
-{
- REGS *regs = vcpu_regs(vcpu);
- struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
-
- if (ipsr->ri == 0) {
- ipsr->ri = 2;
- regs->cr_iip -= 16;
- } else
- ipsr->ri--;
-
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_ifa(VCPU * vcpu, u64 val)
-{
- PSCB(vcpu, ifa) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_itir(VCPU * vcpu, u64 val)
-{
- PSCB(vcpu, itir) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_iipa(VCPU * vcpu, u64 val)
-{
- PSCB(vcpu, iipa) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_ifs(VCPU * vcpu, u64 val)
-{
- PSCB(vcpu, ifs) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_iim(VCPU * vcpu, u64 val)
-{
- PSCB(vcpu, iim) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vcpu_set_iha(VCPU * vcpu, u64 val)
-{
- PSCB(vcpu, iha) = val;
- return IA64_NO_FAULT;
-}
-
-/* control registers */
-extern IA64FAULT vcpu_set_itm(VCPU * vcpu, u64 val);
-extern IA64FAULT vcpu_set_lid(VCPU * vcpu, u64 val);
-extern IA64FAULT vcpu_set_tpr(VCPU * vcpu, u64 val);
-extern IA64FAULT vcpu_set_eoi(VCPU * vcpu, u64 val);
-extern IA64FAULT vcpu_set_lrr0(VCPU * vcpu, u64 val);
-extern IA64FAULT vcpu_set_lrr1(VCPU * vcpu, u64 val);
-extern IA64FAULT vcpu_get_itm(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_itir(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_lid(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_tpr(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_irr0(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_irr1(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_irr2(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_irr3(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_lrr0(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_lrr1(VCPU * vcpu, u64 * pval);
-/* interrupt registers */
-extern void vcpu_pend_unspecified_interrupt(VCPU * vcpu);
-extern u64 vcpu_check_pending_interrupts(VCPU * vcpu);
-extern IA64FAULT vcpu_get_itv(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_pmv(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_cmcv(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_get_ivr(VCPU * vcpu, u64 * pval);
-extern IA64FAULT vcpu_set_itv(VCPU * vcpu, u64 val);
-extern IA64FAULT vcpu_set_pmv(VCPU * vcpu, u64 val);
-extern IA64FAULT vcpu_set_cmcv(VCPU * vcpu, u64 val);
-/* interval timer registers */
-extern IA64FAULT vcpu_set_itc(VCPU * vcpu, u64 val);
-extern u64 vcpu_timer_pending_early(VCPU * vcpu);
-/* debug breakpoint registers */
-extern IA64FAULT vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val);
-extern IA64FAULT vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val);
-extern IA64FAULT vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval);
-extern IA64FAULT vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval);
-/* performance monitor registers */
-extern IA64FAULT vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val);
-extern IA64FAULT vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val);
-extern IA64FAULT vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval);
-extern IA64FAULT vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval);
-/* banked general registers */
-extern IA64FAULT vcpu_bsw0(VCPU * vcpu);
-extern IA64FAULT vcpu_bsw1(VCPU * vcpu);
-/* region registers */
-extern IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val);
-extern IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval);
-extern IA64FAULT vcpu_get_rr_ve(VCPU * vcpu, u64 vadr);
-extern IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcpu, u64 val0, u64 val1,
- u64 val2, u64 val3, u64 val4);
-/* protection key registers */
-extern void vcpu_pkr_load_regs(VCPU * vcpu);
-static inline int vcpu_pkr_in_use(VCPU * vcpu)
-{
- return (PSCBX(vcpu, pkr_flags) & XEN_IA64_PKR_IN_USE);
-}
-static inline void vcpu_pkr_use_set(VCPU * vcpu)
-{
- PSCBX(vcpu, pkr_flags) |= XEN_IA64_PKR_IN_USE;
-}
-static inline void vcpu_pkr_use_unset(VCPU * vcpu)
-{
- PSCBX(vcpu, pkr_flags) &= ~XEN_IA64_PKR_IN_USE;
-}
-extern IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval);
-extern IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val);
-extern IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key);
-/* TLB */
-static inline void vcpu_purge_tr_entry(TR_ENTRY * trp)
-{
- trp->pte.val = 0;
-}
-extern IA64FAULT vcpu_itr_d(VCPU * vcpu, u64 slot, u64 padr, u64 itir, u64 ifa);
-extern IA64FAULT vcpu_itr_i(VCPU * vcpu, u64 slot, u64 padr, u64 itir, u64 ifa);
-extern IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 padr, u64 itir, u64 ifa);
-extern IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 padr, u64 itir, u64 ifa);
-extern IA64FAULT vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 log_range);
-extern IA64FAULT vcpu_ptc_e(VCPU * vcpu, u64 vadr);
-extern IA64FAULT vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 addr_range);
-extern IA64FAULT vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 addr_range);
-extern IA64FAULT vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 log_range);
-extern IA64FAULT vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 log_range);
-union U_IA64_BUNDLE;
-extern int vcpu_get_domain_bundle(VCPU * vcpu, REGS * regs, u64 gip,
- union U_IA64_BUNDLE *bundle);
-extern IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,
- u64 * pteval, u64 * itir, u64 * iha);
-extern IA64FAULT vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr);
-extern IA64FAULT vcpu_force_inst_miss(VCPU * vcpu, u64 ifa);
-extern IA64FAULT vcpu_force_data_miss(VCPU * vcpu, u64 ifa);
-extern IA64FAULT vcpu_fc(VCPU * vcpu, u64 vadr);
-/* misc */
-extern IA64FAULT vcpu_rfi(VCPU * vcpu);
-extern IA64FAULT vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval);
-extern IA64FAULT vcpu_cover(VCPU * vcpu);
-extern IA64FAULT vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * padr);
-extern IA64FAULT vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval);
-
-extern void vcpu_pend_interrupt(VCPU * vcpu, u64 vector);
-extern void vcpu_pend_timer(VCPU * vcpu);
-extern void vcpu_poke_timer(VCPU * vcpu);
-extern void vcpu_set_next_timer(VCPU * vcpu);
-extern BOOLEAN vcpu_timer_expired(VCPU * vcpu);
-extern u64 vcpu_deliverable_interrupts(VCPU * vcpu);
-struct p2m_entry;
-extern void vcpu_itc_no_srlz(VCPU * vcpu, u64, u64, u64, u64, u64,
- struct p2m_entry *);
-extern u64 vcpu_get_tmp(VCPU *, u64);
-extern void vcpu_set_tmp(VCPU *, u64, u64);
-
-extern IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 slot,
- u64 pte, u64 itir, u64 ifa, u64 rid);
-extern IA64FAULT vcpu_set_itr(VCPU * vcpu, u64 slot,
- u64 pte, u64 itir, u64 ifa, u64 rid);
-
-/* Initialize vcpu regs. */
-extern void vcpu_init_regs(struct vcpu *v);
-
-static inline u64 itir_ps(u64 itir)
-{
- return ((itir >> 2) & 0x3f);
-}
-
-static inline u64 itir_mask(u64 itir)
-{
- return (~((1UL << itir_ps(itir)) - 1));
-}
-
-static inline s64 vcpu_get_next_timer_ns(VCPU * vcpu)
-{
- s64 vcpu_get_next_timer_ns;
- u64 d = PSCBX(vcpu, domain_itm);
- u64 now = ia64_get_itc();
-
- if (d > now)
- vcpu_get_next_timer_ns = cycle_to_ns(d - now) + NOW();
- else
- vcpu_get_next_timer_ns =
- cycle_to_ns(local_cpu_data->itm_delta) + NOW();
-
- return vcpu_get_next_timer_ns;
-}
-
-static inline u64 vcpu_pl_adjust(u64 reg, u64 shift)
-{
- u64 pl;
-
- pl = reg & (3UL << shift);
- if (pl < ((u64)CONFIG_CPL0_EMUL << shift))
- pl = (u64)CONFIG_CPL0_EMUL << shift;
- return (reg & ~(3UL << shift)) | pl;
-}
-
-#define verbose(a...) do {if (vcpu_verbose) printk(a);} while(0)
-
-//#define vcpu_quick_region_check(_tr_regions,_ifa) 1
-#define vcpu_quick_region_check(_tr_regions,_ifa) \
- (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
-#define vcpu_quick_region_set(_tr_regions,_ifa) \
- do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
-
-#endif
diff --git a/xen/include/asm-ia64/vcpumask.h b/xen/include/asm-ia64/vcpumask.h
deleted file mode 100644
index fbd4cd1adc..0000000000
--- a/xen/include/asm-ia64/vcpumask.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef __XEN_VCPUMASK_H
-#define __XEN_VCPUMASK_H
-
-/* vcpu mask
- stolen from cpumask.h */
-typedef struct { DECLARE_BITMAP(bits, MAX_VIRT_CPUS); } vcpumask_t;
-
-#define vcpu_set(vcpu, dst) __vcpu_set((vcpu), &(dst))
-static inline void __vcpu_set(int vcpu, volatile vcpumask_t *dstp)
-{
- set_bit(vcpu, dstp->bits);
-}
-#define vcpus_clear(dst) __vcpus_clear(&(dst), MAX_VIRT_CPUS)
-static inline void __vcpus_clear(vcpumask_t *dstp, int nbits)
-{
- bitmap_zero(dstp->bits, nbits);
-}
-/* No static inline type checking - see Subtlety (1) above. */
-#define vcpu_isset(vcpu, vcpumask) test_bit((vcpu), (vcpumask).bits)
-
-#define first_vcpu(src) __first_vcpu(&(src), MAX_VIRT_CPUS)
-static inline int __first_vcpu(const vcpumask_t *srcp, int nbits)
-{
- return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
-}
-
-#define next_vcpu(n, src) __next_vcpu((n), &(src), MAX_VIRT_CPUS)
-static inline int __next_vcpu(int n, const vcpumask_t *srcp, int nbits)
-{
- return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
-}
-
-#if MAX_VIRT_CPUS > 1
-#define for_each_vcpu_mask(d, vcpu, mask) \
- for ((vcpu) = first_vcpu(mask); \
- (vcpu) < d->max_vcpus; \
- (vcpu) = next_vcpu((vcpu), (mask)))
-#else /* NR_CPUS == 1 */
-#define for_each_vcpu_mask(d, vcpu, mask) for ((vcpu) = 0; (vcpu) < 1; (vcpu)++)
-#endif /* NR_CPUS */
-
-#define vcpumask_scnprintf(buf, len, src) \
- __vcpumask_scnprintf((buf), (len), &(src), MAX_VIRT_CPUS)
-static inline int __vcpumask_scnprintf(char *buf, int len,
- const vcpumask_t *srcp, int nbits)
-{
- return bitmap_scnprintf(buf, len, srcp->bits, nbits);
-}
-
-#endif /* __XEN_VCPUMASK_H */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-ia64/vhpt.h b/xen/include/asm-ia64/vhpt.h
deleted file mode 100644
index e5f148ecd1..0000000000
--- a/xen/include/asm-ia64/vhpt.h
+++ /dev/null
@@ -1,107 +0,0 @@
-#ifndef ASM_VHPT_H
-#define ASM_VHPT_H
-
-/* Size of the VHPT. */
-// XXX work around to avoid trigerring xenLinux software lock up detection.
-# define VHPT_SIZE_LOG2 16 // 64KB
-
-/* Number of entries in the VHPT. The size of an entry is 4*8B == 32B */
-#define VHPT_NUM_ENTRIES (1 << (VHPT_SIZE_LOG2 - 5))
-
-// FIXME: These should be automatically generated
-#define VLE_PGFLAGS_OFFSET 0
-#define VLE_ITIR_OFFSET 8
-#define VLE_TITAG_OFFSET 16
-#define VLE_CCHAIN_OFFSET 24
-
-#ifndef __ASSEMBLY__
-#include <xen/percpu.h>
-#include <asm/vcpumask.h>
-
-extern void domain_purge_swtc_entries(struct domain *d);
-extern void domain_purge_swtc_entries_vcpu_dirty_mask(struct domain* d, vcpumask_t vcpu_dirty_mask);
-
-//
-// VHPT Long Format Entry (as recognized by hw)
-//
-struct vhpt_lf_entry {
- unsigned long page_flags;
- unsigned long itir;
- unsigned long ti_tag;
- unsigned long CChain;
-};
-
-#define INVALID_TI_TAG 0x8000000000000000L
-
-extern void vhpt_init (void);
-extern void gather_vhpt_stats(void);
-extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
- unsigned long itir);
-extern void vhpt_insert (unsigned long vadr, unsigned long pte,
- unsigned long itir);
-void local_vhpt_flush(void);
-extern void vcpu_vhpt_flush(struct vcpu* v);
-
-/* Currently the VHPT is allocated per CPU. */
-DECLARE_PER_CPU (unsigned long, vhpt_paddr);
-DECLARE_PER_CPU (unsigned long, vhpt_pend);
-
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
-#if !VHPT_ENABLED
-#error "VHPT_ENABLED must be set for CONFIG_XEN_IA64_PERVCPU_VHPT"
-#endif
-#endif
-
-#include <xen/sched.h>
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
-void domain_set_vhpt_size(struct domain *d, int8_t vhpt_size_log2);
-int pervcpu_vhpt_alloc(struct vcpu *v);
-void pervcpu_vhpt_free(struct vcpu *v);
-#else
-#define domain_set_vhpt_size(d, vhpt_size_log2) do { } while (0)
-#define pervcpu_vhpt_alloc(v) (0)
-#define pervcpu_vhpt_free(v) do { } while (0)
-#endif
-
-static inline unsigned long
-vcpu_vhpt_maddr(struct vcpu* v)
-{
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
- if (HAS_PERVCPU_VHPT(v->domain))
- return v->arch.vhpt_maddr;
-#endif
-
-#if 0
- // referencecing v->processor is racy.
- return per_cpu(vhpt_paddr, v->processor);
-#endif
- BUG_ON(v != current);
- return __get_cpu_var(vhpt_paddr);
-}
-
-static inline unsigned long
-vcpu_pta(struct vcpu* v)
-{
-#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
- if (HAS_PERVCPU_VHPT(v->domain))
- return v->arch.pta.val;
-#endif
- return __va_ul(__get_cpu_var(vhpt_paddr)) | (1 << 8) |
- (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED;
-}
-
-static inline int
-canonicalize_vhpt_size(int sz)
-{
- /* minimum 32KB */
- if (sz < 15)
- return 15;
- /* maximum 8MB (since purging TR is hard coded) */
- if (sz > IA64_GRANULE_SHIFT - 1)
- return IA64_GRANULE_SHIFT - 1;
- return sz;
-}
-
-
-#endif /* !__ASSEMBLY */
-#endif
diff --git a/xen/include/asm-ia64/viosapic.h b/xen/include/asm-ia64/viosapic.h
deleted file mode 100644
index 7004e6e5b9..0000000000
--- a/xen/include/asm-ia64/viosapic.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- *
- * Copyright (C) 2001 MandrakeSoft S.A.
- *
- * MandrakeSoft S.A.
- * 43, rue d'Aboukir
- * 75002 Paris - France
- * http://www.linux-mandrake.com/
- * http://www.mandrakesoft.com/
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __ASM_IA64_VMX_VIOSAPIC_H__
-#define __ASM_IA64_VMX_VIOSAPIC_H__
-
-#include <xen/config.h>
-#include <xen/types.h>
-#include <xen/smp.h>
-#include <public/arch-ia64/hvm/save.h> /* for VIOSAPIC_NUM_PINS and
- union viosapic_rte */
-
-/* Direct registers. */
-#define VIOSAPIC_REG_SELECT 0x00
-#define VIOSAPIC_WINDOW 0x10
-#define VIOSAPIC_EOI 0x40
-
-#define VIOSAPIC_VERSION 0x1
-
-#define VIOSAPIC_DEST_SHIFT 16
-
-
-#define VIOSAPIC_VERSION_ID 0x21 /* IOSAPIC version */
-
-#define VIOSAPIC_DEFAULT_BASE_ADDRESS 0xfec00000
-#define VIOSAPIC_MEM_LENGTH 0x100
-
-#define domain_viosapic(d) (&(d)->arch.hvm_domain.viosapic)
-#define viosapic_domain(v) (container_of((v), struct domain, \
- arch.hvm_domain.viosapic))
-#define vcpu_viosapic(v) (&(v)->domain->arch.hvm_domain.viosapic)
-
-struct viosapic {
- uint64_t irr;
- uint64_t isr; /* This is used for level trigger */
- uint32_t ioregsel;
- spinlock_t lock;
- struct vcpu * lowest_vcpu;
- uint64_t base_address;
- union vioapic_redir_entry redirtbl[VIOSAPIC_NUM_PINS];
-};
-
-void viosapic_init(struct domain *d);
-void viosapic_set_irq(struct domain *d, int irq, int level);
-void viosapic_set_pci_irq(struct domain *d, int device, int intx, int level);
-void viosapic_write(struct vcpu *v, unsigned long addr,
- unsigned long length, unsigned long val);
-
-unsigned long viosapic_read(struct vcpu *v, unsigned long addr,
- unsigned long length);
-void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
- union vioapic_redir_entry *ent);
-
-#endif /* __ASM_IA64_VMX_VIOSAPIC_H__ */
diff --git a/xen/include/asm-ia64/virt_event.h b/xen/include/asm-ia64/virt_event.h
deleted file mode 100644
index d0b66afd7e..0000000000
--- a/xen/include/asm-ia64/virt_event.h
+++ /dev/null
@@ -1,114 +0,0 @@
-#ifndef __VIRT_EVENT_H__
-#define __VIRT_EVENT_H__
-
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * virt_event.h:
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Shaofan Li (Susie Li) (susie.li@intel.com)
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- */
-
-
-#define EVENT_MOV_TO_AR 1
-#define EVENT_MOV_TO_AR_IMM 2
-#define EVENT_MOV_FROM_AR 3
-#define EVENT_MOV_TO_CR 4
-#define EVENT_MOV_FROM_CR 5
-#define EVENT_MOV_TO_PSR 6
-#define EVENT_MOV_FROM_PSR 7
-#define EVENT_ITC_D 8
-#define EVENT_ITC_I 9
-#define EVENT_MOV_TO_RR 10
-#define EVENT_MOV_TO_DBR 11
-#define EVENT_MOV_TO_IBR 12
-#define EVENT_MOV_TO_PKR 13
-#define EVENT_MOV_TO_PMC 14
-#define EVENT_MOV_TO_PMD 15
-#define EVENT_ITR_D 16
-#define EVENT_ITR_I 17
-#define EVENT_MOV_FROM_RR 18
-#define EVENT_MOV_FROM_DBR 19
-#define EVENT_MOV_FROM_IBR 20
-#define EVENT_MOV_FROM_PKR 21
-#define EVENT_MOV_FROM_PMC 22
-#define EVENT_MOV_FROM_CPUID 23
-#define EVENT_SSM 24
-#define EVENT_RSM 25
-#define EVENT_PTC_L 26
-#define EVENT_PTC_G 27
-#define EVENT_PTC_GA 28
-#define EVENT_PTR_D 29
-#define EVENT_PTR_I 30
-#define EVENT_THASH 31
-#define EVENT_TTAG 32
-#define EVENT_TPA 33
-#define EVENT_TAK 34
-#define EVENT_PTC_E 35
-#define EVENT_COVER 36
-#define EVENT_RFI 37
-#define EVENT_BSW_0 38
-#define EVENT_BSW_1 39
-#define EVENT_VMSW 40
-
-#if 0
-/* VMAL 1.0 */
-#define EVENT_MOV_TO_AR 1
-#define EVENT_MOV_TO_AR_IMM 2
-#define EVENT_MOV_FROM_AR 3
-#define EVENT_MOV_TO_CR 4
-#define EVENT_MOV_FROM_CR 5
-#define EVENT_MOV_TO_PSR 6
-#define EVENT_MOV_FROM_PSR 7
-#define EVENT_ITC_D 8
-#define EVENT_ITC_I 9
-#define EVENT_MOV_TO_RR 10
-#define EVENT_MOV_TO_DBR 11
-#define EVENT_MOV_TO_IBR 12
-#define EVENT_MOV_TO_PKR 13
-#define EVENT_MOV_TO_PMC 14
-#define EVENT_MOV_TO_PMD 15
-#define EVENT_ITR_D 16
-#define EVENT_ITR_I 17
-#define EVENT_MOV_FROM_RR 18
-#define EVENT_MOV_FROM_DBR 19
-#define EVENT_MOV_FROM_IBR 20
-#define EVENT_MOV_FROM_PKR 21
-#define EVENT_MOV_FROM_PMC 22
-#define EVENT_MOV_FROM_PMD 23
-#define EVENT_MOV_FROM_CPUID 24
-#define EVENT_SSM 25
-#define EVENT_RSM 26
-#define EVENT_PTC_L 27
-#define EVENT_PTC_G 28
-#define EVENT_PTC_GA 29
-#define EVENT_PTR_D 30
-#define EVENT_PTR_I 31
-#define EVENT_THASH 32
-#define EVENT_TTAG 33
-#define EVENT_TPA 34
-#define EVENT_TAK 35
-#define EVENT_PTC_E 36
-#define EVENT_COVER 37
-#define EVENT_RFI 38
-#define EVENT_BSW_0 39
-#define EVENT_BSW_1 40
-#define EVENT_VMSW 41
-
-
-#endif /* VMAL 2.0 */
-#endif /* __VIRT_EVENT_H__ */
diff --git a/xen/include/asm-ia64/vlsapic.h b/xen/include/asm-ia64/vlsapic.h
deleted file mode 100644
index 8eef87f397..0000000000
--- a/xen/include/asm-ia64/vlsapic.h
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- *
- */
-
-#ifndef _LSAPIC_H
-#define _LSAPIC_H
-#include <xen/sched.h>
-#include <asm/vmx_vcpu.h>
-/*
- *Delivery mode
- */
-#define SAPIC_DELIV_SHIFT 8
-#define SAPIC_FIXED 0x0
-#define SAPIC_LOWEST_PRIORITY 0x1
-#define SAPIC_PMI 0x2
-#define SAPIC_NMI 0x4
-#define SAPIC_INIT 0x5
-#define SAPIC_EXTINT 0x7
-
-/*
- *Interrupt polarity
- */
-#define SAPIC_POLARITY_SHIFT 13
-#define SAPIC_POL_HIGH 0
-#define SAPIC_POL_LOW 1
-
-/*
- *Trigger mode
- */
-#define SAPIC_TRIGGER_SHIFT 15
-#define SAPIC_EDGE 0
-#define SAPIC_LEVEL 1
-
-/*
- * LSAPIC OFFSET
- */
-#define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
-#define PIB_OFST_INTA 0x1E0000
-#define PIB_OFST_XTP 0x1E0008
-
-/*
- *Mask bit
- */
-#define SAPIC_MASK_SHIFT 16
-#define SAPIC_MASK (1 << SAPIC_MASK_SHIFT)
-
-#define VLSAPIC_XTP(_v) VMX(_v, xtp)
-
-extern void vtm_init(struct vcpu *vcpu);
-extern void vtm_set_itc(struct vcpu *vcpu, uint64_t new_itc);
-extern void vtm_set_itm(struct vcpu *vcpu, uint64_t val);
-extern void vtm_set_itv(struct vcpu *vcpu, uint64_t val);
-extern void vmx_vexirq(struct vcpu *vcpu);
-extern void vhpi_detection(struct vcpu *vcpu);
-extern int vlsapic_deliver_int(struct domain *d,
- uint16_t dest, uint64_t dm, uint64_t vector);
-
-extern uint64_t vlsapic_read(struct vcpu *v, uint64_t addr, uint64_t s);
-extern void vlsapic_write(struct vcpu *v, uint64_t addr, uint64_t s, uint64_t val);
-#endif
diff --git a/xen/include/asm-ia64/vmmu.h b/xen/include/asm-ia64/vmmu.h
deleted file mode 100644
index 7f950112ef..0000000000
--- a/xen/include/asm-ia64/vmmu.h
+++ /dev/null
@@ -1,223 +0,0 @@
-
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmmu.h: virtual memory management unit related APIs and data structure.
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- */
-
-#ifndef XEN_TLBthash_H
-#define XEN_TLBthash_H
-
-#define MAX_CCN_DEPTH (15) // collision chain depth
-#define DEFAULT_VTLB_SZ (14) // 16K hash + 16K c-chain for VTLB
-#define DEFAULT_VHPT_SZ (23) // 8M hash + 8M c-chain for VHPT
-#define VTLB(v,_x) (v->arch.vtlb._x)
-#define VHPT(v,_x) (v->arch.vhpt._x)
-
-#ifndef __ASSEMBLY__
-
-#include <xen/config.h>
-#include <xen/types.h>
-#include <public/xen.h>
-#include <asm/tlb.h>
-#include <asm/regionreg.h>
-#include <asm/vmx_mm_def.h>
-#include <asm/bundle.h>
-
-enum {
- ISIDE_TLB=0,
- DSIDE_TLB=1
-};
-#endif /* __ASSEMBLY__ */
-
-#define VTLB_PTE_P_BIT 0
-#define VTLB_PTE_P (1UL<<VTLB_PTE_P_BIT)
-
-#define ITIR_RV_MASK (((1UL<<32)-1)<<32 | 0x3)
-#define PAGE_FLAGS_RV_MASK (0x2 | (0x3UL<<50)|(((1UL<<11)-1)<<53))
-#define PAGE_FLAGS_AR_PL_MASK ((0x7UL<<9)|(0x3UL<<7))
-
-#ifndef __ASSEMBLY__
-typedef struct thash_data {
- union {
- struct {
- u64 p : 1; // 0
- u64 rv1 : 1; // 1
- u64 ma : 3; // 2-4
- u64 a : 1; // 5
- u64 d : 1; // 6
- u64 pl : 2; // 7-8
- u64 ar : 3; // 9-11
- u64 ppn : 38; // 12-49
- u64 rv2 : 2; // 50-51
- u64 ed : 1; // 52
- u64 ig1 : 3; // 53-63
- };
- u64 page_flags;
- }; // same for VHPT and TLB
-
- union {
- struct {
- u64 rv3 : 2; // 0-1
- u64 ps : 6; // 2-7
- u64 key : 24; // 8-31
- u64 rv4 : 32; // 32-63
- };
- u64 itir;
- };
- union {
- struct { // For TLB
- u64 ig2 : 12; // 0-11
- u64 vpn : 49; // 12-60
- u64 vrn : 3; // 61-63
- };
- u64 vadr;
- u64 ifa;
- struct { // For VHPT
- u64 tag : 63; // 0-62
- u64 ti : 1; // 63, invalid entry for VHPT
- };
- u64 etag; // extended tag for VHPT
- };
- union {
- struct thash_data *next;
- u64 rid; // only used in guest TR
-// u64 tr_idx;
- };
-} thash_data_t;
-
-#define INVALIDATE_VHPT_HEADER(hdata) \
-{ ((hdata)->page_flags)=0; \
- ((hdata)->itir)=PAGE_SHIFT<<2; \
- ((hdata)->etag)=1UL<<63; \
- ((hdata)->next)=0;}
-
-#define INVALIDATE_TLB_HEADER(hash) INVALIDATE_VHPT_HEADER(hash)
-
-#define INVALIDATE_HASH_HEADER(hcb,hash) INVALIDATE_VHPT_HEADER(hash)
-
-#define INVALID_VHPT(hdata) ((hdata)->ti)
-#define INVALID_TLB(hdata) ((hdata)->ti)
-#define INVALID_TR(hdata) (!(hdata)->p)
-#define INVALID_ENTRY(hcb, hdata) INVALID_VHPT(hdata)
-
-static inline u64 thash_translate(thash_data_t *hdata, u64 vadr)
-{
- int ps = hdata->ps;
- return (hdata->ppn >> (ps - 12) << ps) | (vadr & ((1UL << ps) - 1));
-}
-
-typedef struct thash_cb {
- /* THASH base information */
- thash_data_t *hash; // hash table pointer, aligned at thash_sz.
- u64 hash_sz; // size of above data.
- void *cch_buf; // base address of collision chain.
- u64 cch_sz; // size of above data.
- u64 cch_free_idx; // index of free entry.
- thash_data_t *cch_freelist;
- PTA pta;
-} thash_cb_t;
-
-/*
- * Allocate and initialize internal control data before service.
- */
-extern int thash_alloc(thash_cb_t *hcb, u64 sz, char *what);
-
-extern void thash_free(thash_cb_t *hcb);
-
-/*
- * Insert an entry to hash table.
- * NOTES:
- * 1: TLB entry may be TR, TC or Foreign Map. For TR entry,
- * itr[]/dtr[] need to be updated too.
- * 2: Inserting to collision chain may trigger recycling if
- * the buffer for collision chain is empty.
- * 3: The new entry is inserted at the hash table.
- * (I.e. head of the collision chain)
- * 4: Return the entry in hash table or collision chain.
- *
- */
-//extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
-//extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx);
-extern int vtr_find_overlap(struct vcpu *vcpu, u64 va, u64 ps, int is_data);
-
-/*
- * Find and purge overlap entries in hash table and its collision chain.
- * PARAS:
- * 1: in: TLB format entry, rid:ps must be same with vrr[].
- * rid, va & ps identify the address space for purge
- * 2: section can be combination of TR, TC and FM. (thash_SECTION_XX)
- * 3: cl means I side or D side.
- * NOTES:
- *
- */
-extern void thash_purge_entries(struct vcpu *v, u64 va, u64 ps);
-extern void thash_purge_entries_remote(struct vcpu *v, u64 va, u64 ps);
-extern int thash_purge_and_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa, int type);
-
-/*
- * Purge all TCs or VHPT entries including those in Hash table.
- *
- */
-extern void thash_purge_all(struct vcpu *v);
-extern void vmx_vcpu_flush_vtlb_all(struct vcpu *v);
-
-/*
- * Lookup the hash table and its collision chain to find an entry
- * covering this address rid:va.
- *
- */
-extern thash_data_t *vtlb_lookup(struct vcpu *v,u64 va,int is_data);
-
-
-extern int init_domain_tlb(struct vcpu *v);
-extern void free_domain_tlb(struct vcpu *v);
-extern thash_data_t * vhpt_lookup(u64 va);
-extern unsigned long fetch_code(struct vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
-extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma, u64 iot);
-extern void emulate_io_update(struct vcpu *vcpu, u64 word, u64 d, u64 d1);
-extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref);
-extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa,
- int type);
-extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
-extern int vhpt_access_rights_fixup(struct vcpu *v, u64 ifa, int is_data);
-
-/*
- * Purge machine tlb.
- * INPUT
- * rr: guest rr.
- * va: only bits 0:60 is valid
- * size: bits format (1<<size) for the address range to purge.
- *
- */
-static inline void machine_tlb_purge(u64 va, u64 ps)
-{
- ia64_ptcl(va, ps << 2);
-}
-
-static inline void vmx_vcpu_set_tr (thash_data_t *trp, u64 pte, u64 itir, u64 va, u64 rid)
-{
- trp->page_flags = pte;
- trp->itir = itir;
- trp->vadr = va;
- trp->rid = rid;
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* XEN_TLBthash_H */
diff --git a/xen/include/asm-ia64/vmx.h b/xen/include/asm-ia64/vmx.h
deleted file mode 100644
index 750761aae7..0000000000
--- a/xen/include/asm-ia64/vmx.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx.h: prototype for generial vmx related interface
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
- */
-
-#ifndef _ASM_IA64_VT_H
-#define _ASM_IA64_VT_H
-
-#include <public/hvm/ioreq.h>
-#include <asm/ia64_int.h>
-
-#define vmx_user_mode(regs) (((struct ia64_psr *)&(regs)->cr_ipsr)->vm == 1)
-
-#define VCPU_LID(v) (((u64)(v)->vcpu_id)<<24)
-
-extern void identify_vmx_feature(void);
-extern unsigned int vmx_enabled;
-extern void *vmx_init_env(void *start, unsigned long end_in_pa);
-extern int vmx_final_setup_guest(struct vcpu *v);
-extern void vmx_save_state(struct vcpu *v);
-extern void vmx_load_state(struct vcpu *v);
-extern int vmx_setup_platform(struct domain *d);
-extern void vmx_do_resume(struct vcpu *v);
-extern void vmx_io_assist(struct vcpu *v);
-extern IA64FAULT ia64_hypercall (struct pt_regs *regs);
-extern unsigned long __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
-extern void set_privileged_operation_isr (struct vcpu *vcpu,int inst);
-extern void set_rsv_reg_field_isr (struct vcpu *vcpu);
-extern void vmx_relinquish_guest_resources(struct domain *d);
-extern void vmx_relinquish_vcpu_resources(struct vcpu *v);
-extern void vmx_send_assist_req(struct vcpu *v);
-extern void deliver_pal_init(struct vcpu *vcpu);
-extern void vmx_pend_pal_init(struct domain *d);
-extern void vmx_lazy_load_fpu(struct vcpu *vcpu);
-
-static inline ioreq_t *get_vio(struct vcpu *v)
-{
- struct domain *d = v->domain;
- shared_iopage_t *p = (shared_iopage_t *)d->arch.vmx_platform.ioreq.va;
- ASSERT((v == current) || spin_is_locked(&d->arch.vmx_platform.ioreq.lock));
- ASSERT(d->arch.vmx_platform.ioreq.va != NULL);
- return &p->vcpu_ioreq[v->vcpu_id];
-}
-#endif /* _ASM_IA64_VT_H */
diff --git a/xen/include/asm-ia64/vmx_mm_def.h b/xen/include/asm-ia64/vmx_mm_def.h
deleted file mode 100644
index f3edd016b5..0000000000
--- a/xen/include/asm-ia64/vmx_mm_def.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_mm_def.h:
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
- */
-#ifndef _MM_DEF_H_
-#define _MM_DEF_H_
-
-
-/* VHPT size 4M */
-//#define VHPT_SIZE_PS 22
-//#define VHPT_SIZE (1 << VHPT_SIZE_PS)
-#define ARCH_PAGE_SHIFT 12
-#define ARCH_PAGE_SIZE PSIZE(ARCH_PAGE_SHIFT)
-#define MAX_PHYS_ADDR_BITS 50
-#define GUEST_IMPL_VA_MSB 59
-#define PMASK(size) (~((size) - 1))
-#define PSIZE(size) (1UL<<(size))
-//#define PAGE_SIZE_4K PSIZE(12)
-#define POFFSET(vaddr, ps) ((vaddr) & (PSIZE(ps) - 1))
-#define PPN_2_PA(ppn) ((ppn)<<12)
-#define CLEARLSB(ppn, nbits) ((((uint64_t)ppn) >> (nbits)) << (nbits))
-#define PAGEALIGN(va, ps) CLEARLSB(va, ps)
-
-#define TLB_AR_R 0
-#define TLB_AR_RX 1
-#define TLB_AR_RW 2
-#define TLB_AR_RWX 3
-#define TLB_AR_R_RW 4
-#define TLB_AR_RX_RWX 5
-#define TLB_AR_RWX_RW 6
-#define TLB_AR_XP 7
-
-#define IA64_ISR_CODE_MASK0 0xf
-#define IA64_UNIMPL_DADDR_FAULT 0x30
-#define IA64_UNIMPL_IADDR_TRAP 0x10
-#define IA64_RESERVED_REG_FAULT 0x30
-#define IA64_REG_NAT_CONSUMPTION_FAULT 0x10
-#define IA64_NAT_CONSUMPTION_FAULT 0x20
-#define IA64_PRIV_OP_FAULT 0x10
-
-#define DEFER_NONE 0
-#define DEFER_ALWAYS 0x1
-#define DEFER_DM 0x100 /* bit 8 */
-#define DEFER_DP 0X200 /* bit 9 */
-#define DEFER_DK 0x400 /* bit 10 */
-#define DEFER_DX 0x800 /* bit 11 */
-#define DEFER_DR 0x1000 /* bit 12 */
-#define DEFER_DA 0x2000 /* bit 13 */
-#define DEFER_DD 0x4000 /* bit 14 */
-
-#define ACCESS_RIGHT(a) ((a) & (ACCESS_FETCHADD - 1))
-
-#define ACCESS_READ 0x1
-#define ACCESS_WRITE 0x2
-#define ACCESS_EXECUTE 0x4
-#define ACCESS_XP0 0x8
-#define ACCESS_XP1 0x10
-#define ACCESS_XP2 0x20
-#define ACCESS_FETCHADD 0x40
-#define ACCESS_XCHG 0x80
-#define ACCESS_CMPXCHG 0x100
-
-#define ACCESS_SIZE_1 0x10000
-#define ACCESS_SIZE_2 0x20000
-#define ACCESS_SIZE_4 0x40000
-#define ACCESS_SIZE_8 0x80000
-#define ACCESS_SIZE_10 0x100000
-#define ACCESS_SIZE_16 0x200000
-
-#define STLB_TC 0
-#define STLB_TR 1
-
-#define IA64_RR_SHIFT 61
-
-#define PHYS_PAGE_SHIFT PPN_SHIFT
-
-#define STLB_SZ_SHIFT 8 // 256
-#define STLB_SIZE (1UL<<STLB_SZ_SHIFT)
-#define STLB_PPS_SHIFT 12
-#define STLB_PPS (1UL<<STLB_PPS_SHIFT)
-#define GUEST_TRNUM 8
-
-/* Virtual address memory attributes encoding */
-#define VA_MATTR_WB 0x0
-#define VA_MATTR_UC 0x4
-#define VA_MATTR_UCE 0x5
-#define VA_MATTR_WC 0x6
-#define VA_MATTR_NATPAGE 0x7
-
-#define VRN_MASK 0xe000000000000000
-#define PTA_BASE_MASK 0x3fffffffffffL
-#define PTA_BASE_SHIFT 15
-#define VHPT_OFFSET_MASK 0x7fff
-
-#define BITS_SHIFT_256MB 28
-#define SIZE_256MB (1UL<<BITS_SHIFT_256MB)
-#define TLB_GR_RV_BITS ((1UL<<1) | (3UL<<50))
-#define HPA_MAPPING_ATTRIBUTE 0x61 //ED:0;AR:0;PL:0;D:1;A:1;P:1
-#define VPN_2_VRN(vpn) ((vpn << PPN_SHIFT) >> IA64_VRN_SHIFT)
-
-#ifndef __ASSEMBLY__
-typedef enum { INSTRUCTION, DATA, REGISTER } miss_type;
-
-//typedef enum { MVHPT, STLB } vtlb_loc_type_t;
-typedef enum { DATA_REF, NA_REF, INST_REF, RSE_REF } vhpt_ref_t;
-
-static __inline__ uint64_t
-bits_v(uint64_t v, uint32_t bs, uint32_t be)
-{
- uint64_t result;
- __asm __volatile("shl %0=%1, %2;; shr.u %0=%0, %3;;"
- : "=r" (result): "r"(v), "r"(63-be), "r" (bs+63-be) );
- return result;
-}
-
-#define bits(val, bs, be) \
-({ \
- u64 ret; \
- \
- __asm __volatile("extr.u %0=%1, %2, %3" \
- : "=r" (ret): "r"(val), \
- "M" ((bs)), \
- "M" ((be) - (bs) + 1) ); \
- ret; \
-})
-
-/*
- * clear bits (pos, len) from v.
- *
- */
-#define clearbits(v, pos, len) \
-({ \
- u64 ret; \
- \
- __asm __volatile("dep.z %0=%1, %2, %3" \
- : "=r" (ret): "r"(v), \
- "M" ((pos)), \
- "M" ((len))); \
- ret; \
- })
-#endif
-
-#endif
diff --git a/xen/include/asm-ia64/vmx_pal.h b/xen/include/asm-ia64/vmx_pal.h
deleted file mode 100644
index a99f723a88..0000000000
--- a/xen/include/asm-ia64/vmx_pal.h
+++ /dev/null
@@ -1,122 +0,0 @@
-#ifndef _ASM_IA64_VT_PAL_H
-#define _ASM_IA64_VT_PAL_H
-
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_pal.h: VT-I specific PAL (Processor Abstraction Layer) definitions
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
- * Fred Yang (fred.yang@intel.com)
- * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
- */
-
-#include <xen/types.h>
-/* PAL PROCEDURE FOR VIRTUALIZATION */
-#define PAL_VP_CREATE 265
-/* Stacked Virt. Initializes a new VPD for the operation of
- * a new virtual processor in the virtual environment.
-*/
-#define PAL_VP_ENV_INFO 266
-/*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/
-#define PAL_VP_EXIT_ENV 267
-/*Stacked Virt. Allows a logical processor to exit a virtual environment.*/
-#define PAL_VP_INIT_ENV 268
-/*Stacked Virt. Allows a logical processor to enter a virtual environment.*/
-#define PAL_VP_REGISTER 269
-/*Stacked Virt. Register a different host IVT for the virtual processor.*/
-#define PAL_VP_RESUME 270
-/* Renamed from PAL_VP_RESUME */
-#define PAL_VP_RESTORE 270
-/*Stacked Virt. Resumes virtual processor operation on the logical processor.*/
-#define PAL_VP_SUSPEND 271
-/* Renamed from PAL_VP_SUSPEND */
-#define PAL_VP_SAVE 271
-/* Stacked Virt. Suspends operation for the specified virtual processor on
- * the logical processor.
- */
-#define PAL_VP_TERMINATE 272
-/* Stacked Virt. Terminates operation for the specified virtual processor.*/
-
-static inline s64
-ia64_pal_vp_env_info(u64 *buffer_size, u64 *vp_env_info)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0);
- *buffer_size=iprv.v0;
- *vp_env_info=iprv.v1;
- return iprv.status;
-}
-
-static inline s64
-ia64_pal_vp_exit_env(u64 iva)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0);
- return iprv.status;
-}
-
-/* config_options in pal_vp_init_env */
-#define VP_INITIALIZE 1UL
-#define VP_FR_PMC 1UL<<1
-#define VP_OPCODE 1UL<<8
-#define VP_CAUSE 1UL<<9
-#define VP_FW_ACC 1UL<<63
-/* init vp env with initializing vm_buffer */
-#define VP_INIT_ENV_INITALIZE VP_INITIALIZE|VP_FR_PMC|VP_OPCODE|VP_CAUSE|VP_FW_ACC
-/* init vp env without initializing vm_buffer */
-#define VP_INIT_ENV VP_FR_PMC|VP_OPCODE|VP_CAUSE|VP_FW_ACC
-
-static inline s64
-ia64_pal_vp_init_env (u64 config_options, u64 pbase_addr, \
- u64 vbase_addr, u64 * vsa_base)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr,\
- vbase_addr);
- *vsa_base=iprv.v0;
- return iprv.status;
-}
-
-static inline s64
-ia64_pal_vp_create (u64 *vpd, u64 *host_iva, u64* opt_handler)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
- (u64)opt_handler);
- return iprv.status;
-}
-
-static inline s64
-ia64_pal_vp_restore (u64 *vpd, u64 pal_proc_vector)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0);
- return iprv.status;
-}
-
-static inline s64
-ia64_pal_vp_save (u64 *vpd, u64 pal_proc_vector)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);
- return iprv.status;
-}
-extern void pal_emul(struct vcpu *vcpu);
-extern void sal_emul(struct vcpu *vcpu);
-#define PAL_PROC_VM_BIT (1UL << 40)
-#define PAL_PROC_VMSW_BIT (1UL << 54)
-#endif /* _ASM_IA64_VT_PAL_H */
diff --git a/xen/include/asm-ia64/vmx_pal_vsa.h b/xen/include/asm-ia64/vmx_pal_vsa.h
deleted file mode 100644
index 056479939c..0000000000
--- a/xen/include/asm-ia64/vmx_pal_vsa.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- */
-
-
-
-#ifndef _PAL_VSA_H_
-#define _PAL_VSA_H_
-
-/* PAL virtualization services */
-
-#ifndef __ASSEMBLY__
-extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
- u64 arg4, u64 arg5, u64 arg6, u64 arg7);
-
-/* entry points in assembly code for calling vps services */
-
-extern char vmx_vps_sync_read;
-extern char vmx_vps_sync_write;
-extern char vmx_vps_resume_normal;
-extern char vmx_vps_resume_handler;
-
-extern u64 __vsa_base;
-#endif /* __ASSEMBLY__ */
-
-#define PAL_VPS_RESUME_NORMAL 0x0000
-#define PAL_VPS_RESUME_HANDLER 0x0400
-#define PAL_VPS_SYNC_READ 0x0800
-#define PAL_VPS_SYNC_WRITE 0x0c00
-#define PAL_VPS_SET_PENDING_INTERRUPT 0x1000
-#define PAL_VPS_THASH 0x1400
-#define PAL_VPS_TTAG 0x1800
-#define PAL_VPS_RESTORE 0x1c00
-#define PAL_VPS_SAVE 0x2000
-
-#endif /* _PAL_VSA_H_ */
-
diff --git a/xen/include/asm-ia64/vmx_phy_mode.h b/xen/include/asm-ia64/vmx_phy_mode.h
deleted file mode 100644
index 91ffbc74a5..0000000000
--- a/xen/include/asm-ia64/vmx_phy_mode.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_phy_mode.h:
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#ifndef _PHY_MODE_H_
-#define _PHY_MODE_H_
-
-/*
- * Guest Physical Mode is emulated by GVMM, which is actually running
- * in virtual mode.
- *
- * For all combinations of (it,dt,rt), only three were taken into
- * account:
- * (0,0,0): some firmware and kernel start code execute in this mode;
- * (1,1,1): most kernel C code execute in this mode;
- * (1,0,1): some low level TLB miss handler code execute in this mode;
- * Till now, no other kind of combinations were found.
- *
- * Because all physical addresses fall into two categories:
- * 0x0xxxxxxxxxxxxxxx, which is cacheable, and 0x8xxxxxxxxxxxxxxx, which
- * is uncacheable. These two kinds of addresses reside in region 0 and 4
- * of the virtual mode. Therefore, we load two different Region IDs
- * (A, B) into RR0 and RR4, respectively, when guest is entering phsical
- * mode. These two RIDs are totally different from the RIDs used in
- * virtual mode. So, the aliasness between physical addresses and virtual
- * addresses can be disambiguated by different RIDs.
- *
- * RID A and B are stolen from the cpu ulm region id. In linux, each
- * process is allocated 8 RIDs:
- * mmu_context << 3 + 0
- * mmu_context << 3 + 1
- * mmu_context << 3 + 2
- * mmu_context << 3 + 3
- * mmu_context << 3 + 4
- * mmu_context << 3 + 5
- * mmu_context << 3 + 6
- * mmu_context << 3 + 7
- * Because all processes share region 5~7, the last 3 are left untouched.
- * So, we stolen "mmu_context << 3 + 5" and "mmu_context << 3 + 6" from
- * ulm and use them as RID A and RID B.
- *
- * When guest is running in (1,0,1) mode, the instructions been accessed
- * reside in region 5~7, not in region 0 or 4. So, instruction can be
- * accessed in virtual mode without interferring physical data access.
- *
- * When dt!=rt, it is rarely to perform "load/store" and "RSE" operation
- * at the same time. No need to consider such a case. We consider (0,1)
- * as (0,0).
- *
- */
-
-
-#ifndef __ASSEMBLY__
-
-#include <asm/vmx_vcpu.h>
-#include <asm/regionreg.h>
-#include <asm/gcc_intrin.h>
-#include <asm/pgtable.h>
-
-#define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
-
-extern void physical_mode_init(VCPU *);
-extern void switch_to_physical_rid(VCPU *);
-extern void switch_to_virtual_rid(VCPU *vcpu);
-extern void switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr);
-extern void switch_mm_mode_fast(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr);
-extern void check_mm_mode_switch(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr);
-extern void prepare_if_physical_mode(VCPU *vcpu);
-extern void recover_if_physical_mode(VCPU *vcpu);
-extern void vmx_init_all_rr(VCPU *vcpu);
-extern void vmx_load_all_rr(VCPU *vcpu);
-extern void physical_tlb_miss(VCPU *vcpu, u64 vadr, int type);
-
-#define VMX_MMU_MODE(v) ((v)->arch.arch_vmx.mmu_mode)
-#define is_virtual_mode(v) (VMX_MMU_MODE(v) == VMX_MMU_VIRTUAL)
-
-#endif /* __ASSEMBLY__ */
-
-#define VMX_MMU_VIRTUAL 0 /* Full virtual mode: it=dt=1 */
-#define VMX_MMU_PHY_D 1 /* Half physical: it=1,dt=0 */
-#define VMX_MMU_PHY_DT 3 /* Full physical mode: it=0,dt=0 */
-
-#define PAL_INIT_ENTRY 0x80000000ffffffa0
-
-#endif /* _PHY_MODE_H_ */
diff --git a/xen/include/asm-ia64/vmx_platform.h b/xen/include/asm-ia64/vmx_platform.h
deleted file mode 100644
index 09f173aa4f..0000000000
--- a/xen/include/asm-ia64/vmx_platform.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * vmx_platform.h: VMX platform support
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-#ifndef __ASM_IA64_VMX_PLATFORM_H__
-#define __ASM_IA64_VMX_PLATFORM_H__
-
-#include <public/xen.h>
-#include <public/hvm/params.h>
-#include <asm/hvm/irq.h>
-#include <asm/viosapic.h>
-#include <asm/hvm/vacpi.h>
-#include <xen/hvm/iommu.h>
-
-struct vmx_ioreq_page {
- spinlock_t lock;
- struct page_info *page;
- void *va;
-};
-int vmx_set_ioreq_page(struct domain *d,
- struct vmx_ioreq_page *iorp, unsigned long gmfn);
-
-typedef struct virtual_platform_def {
- struct vmx_ioreq_page ioreq;
- struct vmx_ioreq_page buf_ioreq;
- struct vmx_ioreq_page buf_pioreq;
- unsigned long pib_base;
- unsigned long params[HVM_NR_PARAMS];
- /* One IOSAPIC now... */
- struct viosapic viosapic;
- struct vacpi vacpi;
- /* Pass-throgh VT-d */
- struct hvm_irq irq;
- struct hvm_iommu hvm_iommu;
-} vir_plat_t;
-
-static inline int __fls(uint32_t word)
-{
- long double d = word;
- long exp;
-
- __asm__ __volatile__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d));
- return word ? (exp - 0xffff) : -1;
-}
-#endif
diff --git a/xen/include/asm-ia64/vmx_vcpu.h b/xen/include/asm-ia64/vmx_vcpu.h
deleted file mode 100644
index 750e6a86ff..0000000000
--- a/xen/include/asm-ia64/vmx_vcpu.h
+++ /dev/null
@@ -1,725 +0,0 @@
-/* -*- Mode:C; c-basic-offset:8; tab-width:8; indent-tabs-mode:nil -*- */
-/*
- * vmx_vcpu.h:
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- */
-
-#ifndef _XEN_IA64_VMX_VCPU_H
-#define _XEN_IA64_VMX_VCPU_H
-
-#include <xen/sched.h>
-#include <asm/ia64_int.h>
-#include <asm/vmx_vpd.h>
-#include <asm/ptrace.h>
-#include <asm/regs.h>
-#include <asm/regionreg.h>
-#include <asm/types.h>
-#include <asm/vcpu.h>
-
-#define VRN_SHIFT 61
-#define VRN0 0x0UL
-#define VRN1 0x1UL
-#define VRN2 0x2UL
-#define VRN3 0x3UL
-#define VRN4 0x4UL
-#define VRN5 0x5UL
-#define VRN6 0x6UL
-#define VRN7 0x7UL
-// for vlsapic
-#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
-
-#define VMX(x,y) ((x)->arch.arch_vmx.y)
-
-#define VMM_RR_SHIFT 20
-#define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1)
-
-extern u64 indirect_reg_igfld_MASK(int type, int index, u64 value);
-extern u64 cr_igfld_mask(int index, u64 value);
-extern int check_indirect_reg_rsv_fields(int type, int index, u64 value);
-extern u64 set_isr_ei_ni(VCPU * vcpu);
-extern u64 set_isr_for_na_inst(VCPU * vcpu, int op);
-extern void set_illegal_op_isr (VCPU *vcpu);
-
-/* next all for VTI domain APIs definition */
-extern void vmx_vcpu_set_psr(VCPU * vcpu, unsigned long value);
-extern IA64FAULT vmx_vcpu_cover(VCPU * vcpu);
-extern IA64FAULT vmx_vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val);
-extern u64 vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg);
-IA64FAULT vmx_vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val);
-extern IA64FAULT vmx_vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
-extern IA64FAULT vmx_vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
-extern IA64FAULT vmx_vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte, u64 itir,
- u64 ifa);
-extern IA64FAULT vmx_vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte, u64 itir,
- u64 ifa);
-extern IA64FAULT vmx_vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 ps);
-extern IA64FAULT vmx_vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 ps);
-extern IA64FAULT vmx_vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 ps);
-extern IA64FAULT vmx_vcpu_ptc_e(VCPU * vcpu, u64 vadr);
-extern IA64FAULT vmx_vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 ps);
-extern IA64FAULT vmx_vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 ps);
-extern u64 vmx_vcpu_thash(VCPU * vcpu, u64 vadr);
-extern u64 vmx_vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa);
-extern u64 vmx_vcpu_ttag(VCPU * vcpu, u64 vadr);
-extern IA64FAULT vmx_vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr);
-extern u64 vmx_vcpu_tak(VCPU * vcpu, u64 vadr);
-extern IA64FAULT vmx_vcpu_rfi(VCPU * vcpu);
-extern u64 vmx_vcpu_get_psr(VCPU * vcpu);
-extern IA64FAULT vmx_vcpu_get_bgr(VCPU * vcpu, unsigned int reg, u64 * val);
-extern IA64FAULT vmx_vcpu_set_bgr(VCPU * vcpu, unsigned int reg, u64 val,
- int nat);
-#if 0
-extern IA64FAULT vmx_vcpu_get_gr(VCPU * vcpu, unsigned reg, u64 * val);
-extern IA64FAULT vmx_vcpu_set_gr(VCPU * vcpu, unsigned reg, u64 value, int nat);
-#endif
-extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24);
-extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU * vcpu, u64 imm24);
-extern IA64FAULT vmx_vcpu_set_psr_l(VCPU * vcpu, u64 val);
-extern void vtm_init(VCPU * vcpu);
-extern uint64_t vtm_get_itc(VCPU * vcpu);
-extern void vtm_set_itc(VCPU * vcpu, uint64_t new_itc);
-extern void vtm_set_itv(VCPU * vcpu, uint64_t val);
-extern void vtm_set_itm(VCPU * vcpu, uint64_t val);
-extern void vlsapic_reset(VCPU * vcpu);
-extern int vmx_check_pending_irq(VCPU * vcpu);
-extern void guest_write_eoi(VCPU * vcpu);
-extern int is_unmasked_irq(VCPU * vcpu);
-extern uint64_t guest_read_vivr(VCPU * vcpu);
-extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
-extern void vcpu_load_kernel_regs(VCPU * vcpu);
-extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
- void *shared_arch_info);
-extern void __vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
-extern void vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
-extern void vmx_ia64_set_dcr(VCPU * v);
-extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
-extern void vmx_asm_bsw0(void);
-extern void vmx_asm_bsw1(void);
-
-/**************************************************************************
- VCPU control register access routines
-**************************************************************************/
-
-static inline u64 vmx_vcpu_get_itm(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, itm));
-}
-
-static inline u64 vmx_vcpu_get_iva(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, iva));
-}
-
-static inline u64 vmx_vcpu_get_pta(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, pta));
-}
-
-static inline u64 vmx_vcpu_get_lid(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, lid));
-}
-
-static inline u64 vmx_vcpu_get_ivr(VCPU * vcpu)
-{
- return ((u64)guest_read_vivr(vcpu));
-}
-
-static inline u64 vmx_vcpu_get_tpr(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, tpr));
-}
-
-static inline u64 vmx_vcpu_get_eoi(VCPU * vcpu)
-{
- return (0UL); // reads of eoi always return 0
-}
-
-static inline u64 vmx_vcpu_get_irr0(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, irr[0]));
-}
-
-static inline u64 vmx_vcpu_get_irr1(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, irr[1]));
-}
-
-static inline u64 vmx_vcpu_get_irr2(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, irr[2]));
-}
-
-static inline u64 vmx_vcpu_get_irr3(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, irr[3]));
-}
-
-static inline u64 vmx_vcpu_get_itv(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, itv));
-}
-
-static inline u64 vmx_vcpu_get_pmv(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, pmv));
-}
-
-static inline u64 vmx_vcpu_get_cmcv(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, cmcv));
-}
-
-static inline u64 vmx_vcpu_get_lrr0(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, lrr0));
-}
-
-static inline u64 vmx_vcpu_get_lrr1(VCPU * vcpu)
-{
- return ((u64)VCPU(vcpu, lrr1));
-}
-
-static inline IA64FAULT vmx_vcpu_set_itm(VCPU * vcpu, u64 val)
-{
- vtm_set_itm(vcpu, val);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vmx_vcpu_set_iva(VCPU * vcpu, u64 val)
-{
- VCPU(vcpu, iva) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vmx_vcpu_set_pta(VCPU * vcpu, u64 val)
-{
- VCPU(vcpu, pta) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vmx_vcpu_set_lid(VCPU * vcpu, u64 val)
-{
- VCPU(vcpu, lid) = val;
- return IA64_NO_FAULT;
-}
-extern IA64FAULT vmx_vcpu_set_tpr(VCPU * vcpu, u64 val);
-
-static inline IA64FAULT vmx_vcpu_set_eoi(VCPU * vcpu, u64 val)
-{
- guest_write_eoi(vcpu);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vmx_vcpu_set_itv(VCPU * vcpu, u64 val)
-{
-
- vtm_set_itv(vcpu, val);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vmx_vcpu_set_pmv(VCPU * vcpu, u64 val)
-{
- VCPU(vcpu, pmv) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vmx_vcpu_set_cmcv(VCPU * vcpu, u64 val)
-{
- VCPU(vcpu, cmcv) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vmx_vcpu_set_lrr0(VCPU * vcpu, u64 val)
-{
- VCPU(vcpu, lrr0) = val;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vmx_vcpu_set_lrr1(VCPU * vcpu, u64 val)
-{
- VCPU(vcpu, lrr1) = val;
- return IA64_NO_FAULT;
-}
-
-/**************************************************************************
- VCPU privileged application register access routines
-**************************************************************************/
-static inline IA64FAULT vmx_vcpu_set_itc(VCPU * vcpu, u64 val)
-{
- vtm_set_itc(vcpu, val);
- return IA64_NO_FAULT;
-}
-
-static inline u64 vmx_vcpu_get_itc(VCPU * vcpu)
-{
- return ((u64)vtm_get_itc(vcpu));
-}
-
-/*
-static inline
-IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, u64 reg, u64 *pval)
-{
- *pval = VMX(vcpu,vrr[reg>>61]);
- return IA64_NO_FAULT;
-}
- */
-/**************************************************************************
- VCPU debug breakpoint register access routines
-**************************************************************************/
-
-static inline u64 vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg)
-{
- // TODO: unimplemented DBRs return a reserved register fault
- // TODO: Should set Logical CPU state, not just physical
- if (reg > 4) {
- panic_domain(vcpu_regs(vcpu),
- "there are only five cpuid registers");
- }
- return ((u64)VCPU(vcpu, vcpuid[reg]));
-}
-
-static inline IA64FAULT vmx_vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
-{
- return vcpu_set_dbr(vcpu, reg, val);
-}
-
-static inline IA64FAULT vmx_vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
-{
- return vcpu_set_ibr(vcpu, reg, val);
-}
-
-static inline IA64FAULT vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 *pval)
-{
- return vcpu_get_dbr(vcpu, reg, pval);
-}
-
-static inline IA64FAULT vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 *pval)
-{
- return vcpu_get_ibr(vcpu, reg, pval);
-}
-
-/**************************************************************************
- VCPU performance monitor register access routines
-**************************************************************************/
-static inline IA64FAULT vmx_vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val)
-{
- // TODO: Should set Logical CPU state, not just physical
- // NOTE: Writes to unimplemented PMC registers are discarded
- ia64_set_pmc(reg, val);
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vmx_vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val)
-{
- // TODO: Should set Logical CPU state, not just physical
- // NOTE: Writes to unimplemented PMD registers are discarded
- ia64_set_pmd(reg, val);
- return IA64_NO_FAULT;
-}
-
-static inline u64 vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg)
-{
- // NOTE: Reads from unimplemented PMC registers return zero
- return ((u64)ia64_get_pmc(reg));
-}
-
-static inline u64 vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg)
-{
- // NOTE: Reads from unimplemented PMD registers return zero
- return ((u64)ia64_get_pmd(reg));
-}
-
-/**************************************************************************
- VCPU banked general register access routines
-**************************************************************************/
-#if 0
-static inline IA64FAULT vmx_vcpu_bsw0(VCPU * vcpu)
-{
-
- VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
- return IA64_NO_FAULT;
-}
-
-static inline IA64FAULT vmx_vcpu_bsw1(VCPU * vcpu)
-{
-
- VCPU(vcpu, vpsr) |= IA64_PSR_BN;
- return IA64_NO_FAULT;
-}
-#endif
-#if 0
-/* Another hash performance algorithm */
-#define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
-#endif
-static inline unsigned long vrrtomrr(VCPU * v, unsigned long val)
-{
- ia64_rr rr;
-
- rr.rrval = val;
- rr.rid = rr.rid + v->arch.starting_rid;
- if (rr.ps > PAGE_SHIFT)
- rr.ps = PAGE_SHIFT;
- rr.ve = 1;
- return vmMangleRID(rr.rrval);
-/* Disable this rid allocation algorithm for now */
-#if 0
- rid = (((u64) vcpu->domain->domain_id) << DOMAIN_RID_SHIFT) + rr.rid;
- rr.rid = redistribute_rid(rid);
-#endif
-
-}
-static inline thash_cb_t *vmx_vcpu_get_vtlb(VCPU * vcpu)
-{
- return &vcpu->arch.vtlb;
-}
-
-static inline thash_cb_t *vcpu_get_vhpt(VCPU * vcpu)
-{
- return &vcpu->arch.vhpt;
-}
-
-
-/**************************************************************************
- VCPU fault injection routines
-**************************************************************************/
-
-/*
- * Set vIFA & vITIR & vIHA, when vPSR.ic =1
- * Parameter:
- * set_ifa: if true, set vIFA
- * set_itir: if true, set vITIR
- * set_iha: if true, set vIHA
- */
-static inline void
-set_ifa_itir_iha (VCPU *vcpu, u64 vadr,
- int set_ifa, int set_itir, int set_iha)
-{
- IA64_PSR vpsr;
- u64 value;
- vpsr.val = VCPU(vcpu, vpsr);
- /* Vol2, Table 8-1 */
- if (vpsr.ic) {
- if (set_ifa){
- vcpu_set_ifa(vcpu, vadr);
- }
- if (set_itir) {
- value = vmx_vcpu_get_itir_on_fault(vcpu, vadr);
- vcpu_set_itir(vcpu, value);
- }
- if (set_iha) {
- value = vmx_vcpu_thash(vcpu, vadr);
- vcpu_set_iha(vcpu, value);
- }
- }
-}
-
-/*
- * Data TLB Fault
- * @ Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-dtlb_fault (VCPU *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR, IHA */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
- inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
-}
-
-/*
- * Instruction TLB Fault
- * @ Instruction TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-itlb_fault (VCPU *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR, IHA */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
- inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
-}
-
-/*
- * Data Nested TLB Fault
- * @ Data Nested TLB Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-nested_dtlb (VCPU *vcpu)
-{
- inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
-}
-
-/*
- * Alternate Data TLB Fault
- * @ Alternate Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-alt_dtlb (VCPU *vcpu, u64 vadr)
-{
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
-}
-
-/*
- * Data TLB Fault
- * @ Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-alt_itlb (VCPU *vcpu, u64 vadr)
-{
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
-}
-
-/*
- * Deal with:
- * VHPT Translation Vector
- */
-static inline void
-_vhpt_fault(VCPU *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR, IHA*/
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
- inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
-}
-
-/*
- * VHPT Instruction Fault
- * @ VHPT Translation vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-ivhpt_fault (VCPU *vcpu, u64 vadr)
-{
- _vhpt_fault(vcpu, vadr);
-}
-
-/*
- * VHPT Data Fault
- * @ VHPT Translation vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-dvhpt_fault (VCPU *vcpu, u64 vadr)
-{
- _vhpt_fault(vcpu, vadr);
-}
-
-/*
- * Deal with:
- * General Exception vector
- */
-static inline void
-_general_exception (VCPU *vcpu)
-{
- inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
-}
-
-/*
- * Illegal Operation Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-illegal_op (VCPU *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Illegal Dependency Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-illegal_dep (VCPU *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Reserved Register/Field Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-rsv_reg_field (VCPU *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Privileged Operation Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-privilege_op (VCPU *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Unimplement Data Address Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-unimpl_daddr (VCPU *vcpu)
-{
- ISR isr;
-
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_UNIMPL_DADDR_FAULT;
- vcpu_set_isr(vcpu, isr.val);
- _general_exception(vcpu);
-}
-
-/*
- * Privileged Register Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-privilege_reg (VCPU *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Deal with
- * Nat consumption vector
- * Parameter:
- * vaddr: Optional, if t == REGISTER
- */
-static inline void
-_nat_consumption_fault(VCPU *vcpu, u64 vadr, miss_type t)
-{
- /* If vPSR.ic && t == DATA/INST, IFA */
- if ( t == DATA || t == INSTRUCTION ) {
- /* IFA */
- set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
- }
-
- inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
-}
-
-/*
- * IR Data Nat Page Consumption Fault
- * @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-#if 0
-static inline void
-ir_nat_page_consumption (VCPU *vcpu, u64 vadr)
-{
- _nat_consumption_fault(vcpu, vadr, DATA);
-}
-#endif //shadow it due to no use currently
-
-/*
- * Instruction Nat Page Consumption Fault
- * @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-inat_page_consumption (VCPU *vcpu, u64 vadr)
-{
- _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
-}
-
-/*
- * Register Nat Consumption Fault
- * @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-rnat_consumption (VCPU *vcpu)
-{
- _nat_consumption_fault(vcpu, 0, REGISTER);
-}
-
-/*
- * Data Nat Page Consumption Fault
- * @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-dnat_page_consumption (VCPU *vcpu, uint64_t vadr)
-{
- _nat_consumption_fault(vcpu, vadr, DATA);
-}
-
-/*
- * Deal with
- * Page not present vector
- */
-static inline void
-__page_not_present(VCPU *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
-}
-
-static inline void
-data_page_not_present(VCPU *vcpu, u64 vadr)
-{
- __page_not_present(vcpu, vadr);
-}
-
-static inline void
-inst_page_not_present(VCPU *vcpu, u64 vadr)
-{
- __page_not_present(vcpu, vadr);
-}
-
-/*
- * Deal with
- * Data access rights vector
- */
-static inline void
-data_access_rights(VCPU *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
-}
-
-/*
- * Unimplement Instruction Address Trap
- * @ Lower-Privilege Transfer Trap Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-static inline void
-unimpl_iaddr_trap (VCPU *vcpu, u64 vadr)
-{
- ISR isr;
-
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_UNIMPL_IADDR_TRAP;
- vcpu_set_isr(vcpu, isr.val);
- vcpu_set_ifa(vcpu, vadr);
- inject_guest_interruption(vcpu, IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR);
-}
-#endif
diff --git a/xen/include/asm-ia64/vmx_vcpu_save.h b/xen/include/asm-ia64/vmx_vcpu_save.h
deleted file mode 100644
index ae5d1ec560..0000000000
--- a/xen/include/asm-ia64/vmx_vcpu_save.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/******************************************************************************
- * vmx_vcpu_save.h
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-#ifndef __ASM_IA64_VMX_VCPU_SAVE_H__
-#define __ASM_IA64_VMX_VCPU_SAVE_H__
-
-#include <xen/sched.h>
-#include <xen/domain.h>
-
-void vmx_arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c);
-int vmx_arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c);
-
-#endif /* __ASM_IA64_VMX_VCPU_SAVE_H__ */
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-ia64/vmx_vpd.h b/xen/include/asm-ia64/vmx_vpd.h
deleted file mode 100644
index 324cae598d..0000000000
--- a/xen/include/asm-ia64/vmx_vpd.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx.h: prototype for generial vmx related interface
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
- */
-
-#ifndef _ASM_IA64_VMX_VPD_H_
-#define _ASM_IA64_VMX_VPD_H_
-
-#ifdef VTI_DEBUG
-/*
- * must be power of 2.
- * Be carefull to avoid stack over flow keeping
- * struct arch_vmx_struct(i.e. struct vcpu) small enough.
- * sizeof(struct ivt_debug) * IVT_DEBUG_MAX = 32 * IVT_DEBUG_MAX
- */
-//#define IVT_DEBUG_MAX 128 /* 4096 bytes */
-#define IVT_DEBUG_MAX 16 /* 512 bytes */
-#endif
-
-#ifndef __ASSEMBLY__
-
-#include <asm/vtm.h>
-#include <asm/vmx_platform.h>
-#include <public/xen.h>
-#include <xen/spinlock.h>
-
-struct sioemu_callback_info;
-
-#define VPD_SHIFT 16
-#define VPD_SIZE (1 << VPD_SHIFT)
-
-#ifdef VTI_DEBUG
-struct ivt_debug{
- unsigned long iip;
- unsigned long ipsr;
- unsigned long ifa;
- unsigned long vector;
-};
-#endif
-
-struct arch_vmx_struct {
-// vpd_t *vpd;
- vtime_t vtm;
- unsigned long vrr[8];
- /* if the corresponding bit is 1, then this page size is
- used in this region */
- unsigned long psbits[8];
- unsigned long vkr[8];
- unsigned long cr_iipa; /* for emulation */
- unsigned long cr_isr; /* for emulation */
- unsigned long cause;
- unsigned long opcode;
- unsigned long mpta;
- unsigned long xen_port;
- unsigned char flags;
- unsigned char xtp;
- unsigned char pal_init_pending;
- unsigned char mmu_mode; /* Current mmu mode. See vmx_phy_mode.h */
-#ifdef VTI_DEBUG
- unsigned long ivt_current;
- struct ivt_debug ivt_debug[IVT_DEBUG_MAX];
-#endif
- /* sioemu info buffer. */
- unsigned long sioemu_info_gpa;
- struct sioemu_callback_info *sioemu_info_mva;
-};
-
-#define VMX_DOMAIN(v) v->arch.arch_vmx.flags
-
-#define ARCH_VMX_DOMAIN 0 /* Need it to indicate VTi domain */
-
-/* pin/unpin vpd area for PAL call with DTR[] */
-void __vmx_vpd_pin(struct vcpu* v);
-void __vmx_vpd_unpin(struct vcpu* v);
-
-static inline void vmx_vpd_pin(struct vcpu* v)
-{
- if (likely(v == current))
- return;
- __vmx_vpd_pin(v);
-}
-
-static inline void vmx_vpd_unpin(struct vcpu* v)
-{
- if (likely(v == current))
- return;
- __vmx_vpd_unpin(v);
-}
-
-#endif //__ASSEMBLY__
-
-// VPD field offset
-#define VPD_VAC_START_OFFSET 0
-#define VPD_VDC_START_OFFSET 8
-#define VPD_VHPI_START_OFFSET 256
-#define VPD_VGR_START_OFFSET 1024
-#define VPD_VBGR_START_OFFSET 1152
-#define VPD_VNAT_START_OFFSET 1280
-#define VPD_VBNAT_START_OFFSET 1288
-#define VPD_VCPUID_START_OFFSET 1296
-#define VPD_VPSR_START_OFFSET 1424
-#define VPD_VPR_START_OFFSET 1432
-#define VPD_VRSE_CFLE_START_OFFSET 1440
-#define VPD_VCR_START_OFFSET 2048
-#define VPD_VTPR_START_OFFSET 2576
-#define VPD_VRR_START_OFFSET 3072
-#define VPD_VMM_VAIL_START_OFFSET 31744
-
-
-#endif /* _ASM_IA64_VMX_VPD_H_ */
diff --git a/xen/include/asm-ia64/vtm.h b/xen/include/asm-ia64/vtm.h
deleted file mode 100644
index dde6764497..0000000000
--- a/xen/include/asm-ia64/vtm.h
+++ /dev/null
@@ -1,67 +0,0 @@
-
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vtm.h: virtual timer head file.
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- */
-
-#ifndef _VTM_H_
-#define _VTM_H_
-
-#include <xen/timer.h>
-#include <xen/types.h>
-
-#define MAX_JUMP_STEP (5000) /* 500ms, max jump step */
-#define MIN_GUEST_RUNNING_TIME (0) /* 10ms for guest os to run */
-#define ITV_VECTOR_MASK (0xff)
-
-typedef struct vtime {
- long vtm_offset; // guest ITC = host ITC + vtm_offset
- uint64_t vtm_local_drift;
- uint64_t last_itc;
- uint64_t pending;
- /*
- * Local drift (temporary) after guest suspension
- * In case of long jump amount of ITC after suspension,
- * guest ITC = host ITC + vtm_offset - vtm_local_drift;
- * so that the duration passed saw in guest ITC is limited to
- * cfg_max_jump that will make all kind of device driver happy.
- */
-
- // next all uses ITC tick as unit
- uint64_t cfg_max_jump; // max jump within one time suspendsion
- uint64_t cfg_min_grun; // min guest running time since last jump
-// uint64_t latest_read_itc; // latest guest read ITC
- struct timer vtm_timer;
-// int triggered;
-
-
- uint64_t guest_running_time; // guest running time since last switch
- //uint64_t vtm_last_suspending_time;
- //uint64_t switch_in_time;
- //uint64_t switch_out_time;
- //uint64_t itc_freq;
-
-} vtime_t;
-
-#define ITV_VECTOR(itv) (itv&0xff)
-#define ITV_IRQ_MASK(itv) (itv&(1<<16))
-
-#define VTM_FIRED(vtm) ((vtm)->triggered)
-
-#endif /* _STATS_H_ */
diff --git a/xen/include/asm-ia64/xengcc_intrin.h b/xen/include/asm-ia64/xengcc_intrin.h
deleted file mode 100644
index 818fae7da7..0000000000
--- a/xen/include/asm-ia64/xengcc_intrin.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef _ASM_IA64_XENGCC_INTRIN_H
-#define _ASM_IA64_XENGCC_INTRIN_H
-/*
- * Flushrs instruction stream.
- */
-#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
-
-#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
-
-#define ia64_get_rsc() \
-({ \
- unsigned long val; \
- asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory"); \
- val; \
-})
-
-#define ia64_set_rsc(val) \
- asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
-
-#define ia64_get_bspstore() \
-({ \
- unsigned long val; \
- asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory"); \
- val; \
-})
-
-#define ia64_set_bspstore(val) \
- asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
-
-#define ia64_get_rnat() \
-({ \
- unsigned long val; \
- asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory"); \
- val; \
-})
-
-#define ia64_set_rnat(val) \
- asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
-
-#define ia64_ttag(addr) \
-({ \
- __u64 ia64_intri_res; \
- asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
- ia64_intri_res; \
-})
-
-#define ia64_get_dcr() \
-({ \
- __u64 result; \
- asm volatile ("mov %0=cr.dcr" : "=r"(result) : ); \
- result; \
-})
-
-#define ia64_set_dcr(val) \
-({ \
- asm volatile ("mov cr.dcr=%0" :: "r"(val) ); \
-})
-
-#endif /* _ASM_IA64_XENGCC_INTRIN_H */
diff --git a/xen/include/asm-ia64/xenia64regs.h b/xen/include/asm-ia64/xenia64regs.h
deleted file mode 100644
index 099fc7250e..0000000000
--- a/xen/include/asm-ia64/xenia64regs.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _ASM_IA64_XENIA64REGS_H
-#define _ASM_IA64_XENIA64REGS_H
-
-#define IA64_REG_CR_DCR 0
-#define IA64_REG_CR_ITM 1
-#define IA64_REG_CR_IVA 2
-#define IA64_REG_CR_PTA 8
-#define IA64_REG_CR_IPSR 16
-#define IA64_REG_CR_ISR 17
-#define IA64_REG_CR_IIP 19
-#define IA64_REG_CR_IFA 20
-#define IA64_REG_CR_ITIR 21
-#define IA64_REG_CR_IIPA 22
-#define IA64_REG_CR_IFS 23
-#define IA64_REG_CR_IIM 24
-#define IA64_REG_CR_IHA 25
-#define IA64_REG_CR_LID 64
-#define IA64_REG_CR_IVR 65
-#define IA64_REG_CR_TPR 66
-#define IA64_REG_CR_EOI 67
-#define IA64_REG_CR_IRR0 68
-#define IA64_REG_CR_IRR1 69
-#define IA64_REG_CR_IRR2 70
-#define IA64_REG_CR_IRR3 71
-#define IA64_REG_CR_ITV 72
-#define IA64_REG_CR_PMV 73
-#define IA64_REG_CR_CMCV 74
-#define IA64_REG_CR_LRR0 80
-#define IA64_REG_CR_LRR1 81
-
-#endif /* _ASM_IA64_XENIA64REGS_H */
diff --git a/xen/include/asm-ia64/xenkregs.h b/xen/include/asm-ia64/xenkregs.h
deleted file mode 100644
index 8a36ca9904..0000000000
--- a/xen/include/asm-ia64/xenkregs.h
+++ /dev/null
@@ -1,98 +0,0 @@
-#ifndef _ASM_IA64_XENKREGS_H
-#define _ASM_IA64_XENKREGS_H
-
-/*
- * Translation registers:
- */
-#define IA64_TR_MAPPED_REGS 3 /* dtr3: vcpu mapped regs */
-#define IA64_TR_SHARED_INFO 4 /* dtr4: page shared with domain */
-#define IA64_TR_VHPT 5 /* dtr5: vhpt */
-
-#define IA64_TR_VPD 2 /* itr2: vpd */
-
-#define IA64_DTR_GUEST_KERNEL 7
-#define IA64_ITR_GUEST_KERNEL 2
-/* Processor status register bits: */
-#define IA64_PSR_VM_BIT 46
-#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
-
-#define IA64_DEFAULT_DCR_BITS (IA64_DCR_PP | IA64_DCR_LC | IA64_DCR_DM | \
- IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | \
- IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD)
-
-// note IA64_PSR_PK removed from following, why is this necessary?
-#define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
- IA64_PSR_DT | IA64_PSR_RT | \
- IA64_PSR_IT | IA64_PSR_BN)
-
-#define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL| IA64_PSR_DFH| \
- IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
- IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
- IA64_PSR_CPL| IA64_PSR_MC | IA64_PSR_IS | \
- IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
- IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
-
-// NO PSR_CLR IS DIFFERENT! (CPL)
-#define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
-#define IA64_PSR_CPL0 (__IA64_UL(1) << IA64_PSR_CPL0_BIT)
-
-/* Interruption Function State */
-#define IA64_IFS_V_BIT 63
-#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT)
-
-/* Interruption Status Register. */
-#define IA64_ISR_NI_BIT 39 /* Nested interrupt. */
-
-/* Page Table Address */
-#define IA64_PTA_VE_BIT 0
-#define IA64_PTA_SIZE_BIT 2
-#define IA64_PTA_SIZE_LEN 6
-#define IA64_PTA_VF_BIT 8
-#define IA64_PTA_BASE_BIT 15
-
-#define IA64_PTA_VE (__IA64_UL(1) << IA64_PTA_VE_BIT)
-#define IA64_PTA_SIZE (__IA64_UL((1 << IA64_PTA_SIZE_LEN) - 1) << \
- IA64_PTA_SIZE_BIT)
-#define IA64_PTA_VF (__IA64_UL(1) << IA64_PTA_VF_BIT)
-#define IA64_PTA_BASE (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
-
-/* Some cr.itir declarations. */
-#define IA64_ITIR_PS 2
-#define IA64_ITIR_PS_LEN 6
-#define IA64_ITIR_PS_MASK (((__IA64_UL(1) << IA64_ITIR_PS_LEN) - 1) \
- << IA64_ITIR_PS)
-#define IA64_ITIR_KEY 8
-#define IA64_ITIR_KEY_LEN 24
-#define IA64_ITIR_KEY_MASK (((__IA64_UL(1) << IA64_ITIR_KEY_LEN) - 1) \
- << IA64_ITIR_KEY)
-#define IA64_ITIR_PS_KEY(_ps, _key) (((_ps) << IA64_ITIR_PS) | \
- (((_key) << IA64_ITIR_KEY)))
-
-/* Region Register Bits */
-#define IA64_RR_PS 2
-#define IA64_RR_PS_LEN 6
-#define IA64_RR_RID 8
-#define IA64_RR_RID_LEN 24
-#define IA64_RR_RID_MASK (((__IA64_UL(1) << IA64_RR_RID_LEN) - 1) << \
- IA64_RR_RID
-
-/* Define Protection Key Register (PKR) */
-#define IA64_PKR_V 0
-#define IA64_PKR_WD 1
-#define IA64_PKR_RD 2
-#define IA64_PKR_XD 3
-#define IA64_PKR_MBZ0 4
-#define IA64_PKR_KEY 8
-#define IA64_PKR_KEY_LEN 24
-#define IA64_PKR_MBZ1 32
-
-#define IA64_PKR_VALID (1 << IA64_PKR_V)
-#define IA64_PKR_KEY_MASK (((__IA64_UL(1) << IA64_PKR_KEY_LEN) - 1) \
- << IA64_PKR_KEY)
-
-#define XEN_IA64_NPKRS 15 /* Number of pkr's in PV */
-
- /* A pkr val for the hypervisor: key = 0, valid = 1. */
-#define XEN_IA64_PKR_VAL ((0 << IA64_PKR_KEY) | IA64_PKR_VALID)
-
-#endif /* _ASM_IA64_XENKREGS_H */
diff --git a/xen/include/asm-ia64/xenmca.h b/xen/include/asm-ia64/xenmca.h
deleted file mode 100644
index 653253e8fd..0000000000
--- a/xen/include/asm-ia64/xenmca.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * File: xenmca.h
- * Purpose: Machine check handling specific defines for Xen
- *
- * Copyright (C) 2006 FUJITSU LTD. (kaz@jp.fujitsu.com)
- */
-
-#ifndef _ASM_IA64_XENMCA_H
-#define _ASM_IA64_XENMCA_H
-
-#ifndef __ASSEMBLER__
-#include <linux/list.h>
-#include <asm/sal.h>
-
-typedef struct sal_queue_entry_t {
- int cpuid;
- int sal_info_type;
- unsigned int vector;
- unsigned int virq;
- unsigned int length;
- struct list_head list;
-} sal_queue_entry_t;
-
-extern struct list_head *sal_queue;
-
-struct ia64_mca_tlb_info {
- u64 cr_lid;
- u64 percpu_paddr;
-};
-
-extern struct ia64_mca_tlb_info ia64_mca_tlb_list[];
-#endif /* __ASSEMBLER__ */
-
-#endif /* _ASM_IA64_XENMCA_H */
diff --git a/xen/include/asm-ia64/xenoprof.h b/xen/include/asm-ia64/xenoprof.h
deleted file mode 100644
index daf2ffa9e3..0000000000
--- a/xen/include/asm-ia64/xenoprof.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/******************************************************************************
- * asm-ia64/xenoprof.h
- * xenoprof ia64 arch specific header file
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- * VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_XENOPROF_H__
-#define __ASM_XENOPROF_H__
-
-#include <xen/grant_table.h>
-
-int xenoprof_arch_init(int *num_events, char *cpu_type);
-int xenoprof_arch_reserve_counters(void);
-int xenoprof_arch_counter(XEN_GUEST_HANDLE(void) arg);
-int xenoprof_arch_setup_events(void);
-int xenoprof_arch_enable_virq(void);
-int xenoprof_arch_start(void);
-void xenoprof_arch_stop(void);
-void xenoprof_arch_disable_virq(void);
-void xenoprof_arch_release_counters(void);
-
-static inline int xenoprof_arch_ibs_counter(XEN_GUEST_HANDLE(void) arg)
-{
- return -ENOSYS; /* not supported */
-}
-/* AMD IBS not supported */
-#define ibs_caps 0
-
-struct vcpu;
-struct cpu_user_regs;
-int xenoprofile_get_mode(const struct vcpu *, const struct cpu_user_regs *);
-static inline int xenoprof_backtrace_supported(void)
-{
- return 0;
-}
-static inline void xenoprof_backtrace(struct vcpu *vcpu,
- const struct pt_regs *regs, unsigned long depth, int mode)
-{
- /* To be implemented */
- return;
-}
-#define xenoprof_shared_gmfn(d, gmaddr, maddr) \
-do { \
- unsigned long ret; \
- ret = create_grant_host_mapping((gmaddr), \
- (maddr) >> PAGE_SHIFT, 0, 0); \
- BUG_ON(ret != GNTST_okay); \
-} while (0)
-
-static inline int
-ring(const struct pt_regs* regs)
-{
- return ((struct ia64_psr*)(&(regs)->cr_ipsr))->cpl;
-}
-#define ring_0(r) (ring(r) == 0)
-#define ring_1(r) (ring(r) == 1)
-#define ring_2(r) (ring(r) == 2)
-#define ring_3(r) (ring(r) == 3)
-
-#endif /* __ASM_XENOPROF_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-ia64/xenpage.h b/xen/include/asm-ia64/xenpage.h
deleted file mode 100644
index 4acbe91e13..0000000000
--- a/xen/include/asm-ia64/xenpage.h
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef _ASM_IA64_XENPAGE_H
-#define _ASM_IA64_XENPAGE_H
-
-/* moved from xen/include/asm-ia64/linux-xen/asm/pgtable.h to compile */
-#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
-
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_VIRTUAL_FRAME_TABLE
-extern int ia64_mfn_valid (unsigned long pfn);
-# define mfn_valid(_pfn) (((_pfn) < max_page) && ia64_mfn_valid(_pfn))
-#else
-# define mfn_valid(_pfn) ((_pfn) < max_page)
-#endif
-# define page_to_mfn(_page) ((unsigned long) ((_page) - frame_table))
-# define mfn_to_page(_pfn) (frame_table + (_pfn))
-
-
-#include <asm/xensystem.h>
-
-/*
- * macro: avoid header inclustion hell
- * static inline unsigned long __virt_to_maddr(unsigned long va)
- */
-/*
- * Because the significant 8 bits of VA are used by Xen,
- * and xen uses cached/uncached identity mapping.
- * IA64_MAX_PHYS_BITS can't be larger than 56
- */
-#define __virt_to_maddr(va) \
- ({ \
- unsigned long __va__ = (va); \
- (__va__ - KERNEL_START < KERNEL_TR_PAGE_SIZE) ? \
- xen_pstart + (__va__ - KERNEL_START) : \
- (__va__ & ((1UL << IA64_MAX_PHYS_BITS) - 1)); \
- })
-
-#define virt_to_maddr(va) (__virt_to_maddr((unsigned long)va))
-
-
-#define page_to_maddr(page) (page_to_mfn(page) << PAGE_SHIFT)
-#define virt_to_page(kaddr) (mfn_to_page(virt_to_maddr(kaddr) >> PAGE_SHIFT))
-
-#define page_to_virt(_page) maddr_to_virt(page_to_maddr(_page))
-#define maddr_to_page(kaddr) mfn_to_page(((kaddr) >> PAGE_SHIFT))
-
-/* Convert between Xen-heap virtual addresses and machine frame numbers. */
-#define virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
-#define mfn_to_virt(mfn) maddr_to_virt(mfn << PAGE_SHIFT)
-
-/* Convert between frame number and address formats. */
-#define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
-#define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
-
-typedef union xen_va {
- struct {
- unsigned long off : 60;
- unsigned long reg : 4;
- } f;
- unsigned long l;
- void *p;
-} xen_va;
-
-static inline int get_order_from_shift(unsigned long shift)
-{
- if (shift <= PAGE_SHIFT)
- return 0;
- else
- return shift - PAGE_SHIFT;
-}
-/* from identity va to xen va */
-#define virt_to_xenva(va) ((unsigned long)va - PAGE_OFFSET - \
- xen_pstart + KERNEL_START)
-
-/* Clear bit 63 (UC bit in physical addresses). */
-static inline u64 pa_clear_uc(u64 paddr)
-{
- return (paddr << 1) >> 1;
-}
-
-#define __pa(x) (virt_to_maddr(x))
-#define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
-
-/* It is sometimes very useful to have unsigned long as result. */
-#define __va_ul(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.l;})
-
-#define __va_efi(x) ((void*)((unsigned long)(x) | __IA64_EFI_CACHED_OFFSET))
-
-#endif
-#endif /* _ASM_IA64_XENPAGE_H */
diff --git a/xen/include/asm-ia64/xenprocessor.h b/xen/include/asm-ia64/xenprocessor.h
deleted file mode 100644
index 4f3cce4c10..0000000000
--- a/xen/include/asm-ia64/xenprocessor.h
+++ /dev/null
@@ -1,253 +0,0 @@
-#ifndef _ASM_IA64_XENPROCESSOR_H
-#define _ASM_IA64_XENPROCESSOR_H
-/*
- * xen specific processor definition
- *
- * Copyright (C) 2005 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- *
- * Copyright (C) 2005 Intel Co.
- * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
- *
- */
-
-
-#define ia64_is_local_fpu_owner(t) 0
-
-/* like above but expressed as bitfields for more efficient access: */
-struct ia64_psr {
- __u64 reserved0 : 1;
- __u64 be : 1;
- __u64 up : 1;
- __u64 ac : 1;
- __u64 mfl : 1;
- __u64 mfh : 1;
- __u64 reserved1 : 7;
- __u64 ic : 1;
- __u64 i : 1;
- __u64 pk : 1;
- __u64 reserved2 : 1;
- __u64 dt : 1;
- __u64 dfl : 1;
- __u64 dfh : 1;
- __u64 sp : 1;
- __u64 pp : 1;
- __u64 di : 1;
- __u64 si : 1;
- __u64 db : 1;
- __u64 lp : 1;
- __u64 tb : 1;
- __u64 rt : 1;
- __u64 reserved3 : 4;
- __u64 cpl : 2;
- __u64 is : 1;
- __u64 mc : 1;
- __u64 it : 1;
- __u64 id : 1;
- __u64 da : 1;
- __u64 dd : 1;
- __u64 ss : 1;
- __u64 ri : 2;
- __u64 ed : 1;
- __u64 bn : 1;
- __u64 ia : 1;
- __u64 vm : 1;
- __u64 reserved5 : 17;
-};
-
-/* vmx like above but expressed as bitfields for more efficient access: */
-typedef union{
- __u64 val;
- struct{
- __u64 reserved0 : 1;
- __u64 be : 1;
- __u64 up : 1;
- __u64 ac : 1;
- __u64 mfl : 1;
- __u64 mfh : 1;
- __u64 reserved1 : 7;
- __u64 ic : 1;
- __u64 i : 1;
- __u64 pk : 1;
- __u64 reserved2 : 1;
- __u64 dt : 1;
- __u64 dfl : 1;
- __u64 dfh : 1;
- __u64 sp : 1;
- __u64 pp : 1;
- __u64 di : 1;
- __u64 si : 1;
- __u64 db : 1;
- __u64 lp : 1;
- __u64 tb : 1;
- __u64 rt : 1;
- __u64 reserved3 : 4;
- __u64 cpl : 2;
- __u64 is : 1;
- __u64 mc : 1;
- __u64 it : 1;
- __u64 id : 1;
- __u64 da : 1;
- __u64 dd : 1;
- __u64 ss : 1;
- __u64 ri : 2;
- __u64 ed : 1;
- __u64 bn : 1;
- __u64 reserved4 : 19;
- };
-} IA64_PSR;
-
-typedef union {
- __u64 val;
- struct {
- __u64 code : 16;
- __u64 vector : 8;
- __u64 reserved1 : 8;
- __u64 x : 1;
- __u64 w : 1;
- __u64 r : 1;
- __u64 na : 1;
- __u64 sp : 1;
- __u64 rs : 1;
- __u64 ir : 1;
- __u64 ni : 1;
- __u64 so : 1;
- __u64 ei : 2;
- __u64 ed : 1;
- __u64 reserved2 : 20;
- };
-} ISR;
-
-
-typedef union {
- __u64 val;
- struct {
- __u64 ve : 1;
- __u64 reserved0 : 1;
- __u64 size : 6;
- __u64 vf : 1;
- __u64 reserved1 : 6;
- __u64 base : 49;
- };
-} PTA;
-
-typedef union {
- __u64 val;
- struct {
- __u64 rv : 16;
- __u64 eid : 8;
- __u64 id : 8;
- __u64 ig : 32;
- };
-} LID;
-
-typedef union{
- __u64 val;
- struct {
- __u64 rv : 3;
- __u64 ir : 1;
- __u64 eid : 8;
- __u64 id : 8;
- __u64 ib_base : 44;
- };
-} ipi_a_t;
-
-typedef union{
- __u64 val;
- struct {
- __u64 vector : 8;
- __u64 dm : 3;
- __u64 ig : 53;
- };
-} ipi_d_t;
-
-typedef union {
- __u64 val;
- struct {
- __u64 ig0 : 4;
- __u64 mic : 4;
- __u64 rsv : 8;
- __u64 mmi : 1;
- __u64 ig1 : 47;
- };
-} tpr_t;
-
-/* indirect register type */
-enum {
- IA64_CPUID, /* cpuid */
- IA64_DBR, /* dbr */
- IA64_IBR, /* ibr */
- IA64_PKR, /* pkr */
- IA64_PMC, /* pmc */
- IA64_PMD, /* pmd */
- IA64_RR /* rr */
-};
-
-/* instruction type */
-enum {
- IA64_INST_TPA=1,
- IA64_INST_TAK
-};
-
-/* Generate Mask
- * Parameter:
- * bit -- starting bit
- * len -- how many bits
- */
-#define MASK(bit,len) \
-({ \
- __u64 ret; \
- \
- __asm __volatile("dep %0=-1, r0, %1, %2" \
- : "=r" (ret): \
- "M" (bit), \
- "M" (len) ); \
- ret; \
-})
-
-typedef union {
- struct {
- __u64 kr0;
- __u64 kr1;
- __u64 kr2;
- __u64 kr3;
- __u64 kr4;
- __u64 kr5;
- __u64 kr6;
- __u64 kr7;
- };
- __u64 _kr[8];
-} cpu_kr_ia64_t;
-
-DECLARE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
-
-typedef union {
- struct {
- u64 rv3 : 2; // 0-1
- u64 ps : 6; // 2-7
- u64 key : 24; // 8-31
- u64 rv4 : 32; // 32-63
- };
- struct {
- u64 __rv3 : 32; // 0-31
- // next extension to rv4
- u64 rid : 24; // 32-55
- u64 __rv4 : 8; // 56-63
- };
- u64 itir;
-} ia64_itir_t;
-
-typedef union {
- u64 val;
- struct {
- u64 v : 1;
- u64 wd : 1;
- u64 rd : 1;
- u64 xd : 1;
- u64 reserved1 : 4;
- u64 key : 24;
- u64 reserved2 : 32;
- };
-} ia64_pkr_t;
-
-#endif // _ASM_IA64_XENPROCESSOR_H
diff --git a/xen/include/asm-ia64/xensystem.h b/xen/include/asm-ia64/xensystem.h
deleted file mode 100644
index 1fa0456bd5..0000000000
--- a/xen/include/asm-ia64/xensystem.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef _ASM_IA64_XENSYSTEM_H
-#define _ASM_IA64_XENSYSTEM_H
-/*
- * xen specific context definition
- *
- * Copyright (C) 2005 Hewlett-Packard Co.
- * Dan Magenheimer (dan.magenheimer@hp.com)
- *
- * Copyright (C) 2005 Intel Co.
- * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
- *
- */
-
-/* Define HV space hierarchy.
- VMM memory space is protected by CPL for paravirtualized domains and
- by VA for VTi domains. VTi imposes VA bit 60 != VA bit 59 for VMM. */
-
-#define HYPERVISOR_VIRT_START 0xf000000000000000
-#define __IA64_UNCACHED_OFFSET 0xf200000000000000UL
-#define DEFAULT_SHAREDINFO_ADDR 0xf500000000000000
-#define PERCPU_ADDR (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
-#ifdef CONFIG_VIRTUAL_FRAME_TABLE
-#define VIRT_FRAME_TABLE_ADDR 0xf600000000000000
-#define VIRT_FRAME_TABLE_END 0xf700000000000000
-#endif
-#define HYPERVISOR_VIRT_END 0xf800000000000000
-
-#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000)
-
-#define XEN_VIRT_UC_BIT 57
-
-#define KERNEL_START 0xf400000004000000
-#define GATE_ADDR KERNEL_START
-
-/* In order for Kexec between Xen and Linux to work EFI needs
- * to be mapped into the same place by both. It seems most convenient
- * to make Xen do the dirty work here */
-#define __IA64_EFI_UNCACHED_OFFSET 0xc000000000000000UL
-#define __IA64_EFI_CACHED_OFFSET 0xe000000000000000UL
-
-#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
-
-#endif // _ASM_IA64_XENSYSTEM_H
diff --git a/xen/include/asm-ia64/xentypes.h b/xen/include/asm-ia64/xentypes.h
deleted file mode 100644
index f898f4004c..0000000000
--- a/xen/include/asm-ia64/xentypes.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _ASM_IA64_XENTYPES_H
-#define _ASM_IA64_XENTYPES_H
-
-#ifndef __ASSEMBLY__
-typedef unsigned long ssize_t;
-typedef unsigned long size_t;
-typedef long long loff_t;
-
-typedef char bool_t;
-#define test_and_set_bool(b) xchg(&(b), 1)
-#define test_and_clear_bool(b) xchg(&(b), 0)
-
-#define BYTES_PER_LONG 8
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_IA64_XENTYPES_H */
diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h
index 06e9884db4..9ec5afa243 100644
--- a/xen/include/asm-x86/hvm/irq.h
+++ b/xen/include/asm-x86/hvm/irq.h
@@ -104,11 +104,4 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v);
struct hvm_intack hvm_vcpu_ack_pending_irq(struct vcpu *v,
struct hvm_intack intack);
-/*
- * Currently IA64 Xen doesn't support MSI. So for x86, we define this macro
- * to control the conditional compilation of some MSI-related functions.
- * This macro will be removed once IA64 has MSI support.
- */
-#define SUPPORT_MSI_REMAPPING 1
-
#endif /* __ASM_X86_HVM_IRQ_H__ */
diff --git a/xen/include/asm-x86/hvm/vioapic.h b/xen/include/asm-x86/hvm/vioapic.h
index f2c17535bf..ab4e07eb03 100644
--- a/xen/include/asm-x86/hvm/vioapic.h
+++ b/xen/include/asm-x86/hvm/vioapic.h
@@ -41,7 +41,7 @@
/* Direct registers. */
#define VIOAPIC_REG_SELECT 0x00
#define VIOAPIC_REG_WINDOW 0x10
-#define VIOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */
+#define VIOAPIC_REG_EOI 0x40
/* Indirect registers. */
#define VIOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */
diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h
index 1aa1412d89..22abd1fd80 100644
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -82,7 +82,7 @@ typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
extern unsigned int nr_cpu_ids;
-#if NR_CPUS > 4 * BITS_PER_LONG && !defined(__ia64__)
+#if NR_CPUS > 4 * BITS_PER_LONG
/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
* not all bits may be allocated. */
extern unsigned int nr_cpumask_bits;
@@ -263,37 +263,6 @@ static inline const cpumask_t *cpumask_of(unsigned int cpu)
return (const cpumask_t *)(p - cpu / BITS_PER_LONG);
}
-#if defined(__ia64__) /* XXX needs cleanup */
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
-#if NR_CPUS <= BITS_PER_LONG
-
-#define CPU_MASK_ALL \
-/*(cpumask_t)*/ { { \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-} }
-
-#else
-
-#define CPU_MASK_ALL \
-/*(cpumask_t)*/ { { \
- [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-} }
-
-#endif
-
-#define CPU_MASK_NONE \
-/*(cpumask_t)*/ { { \
- 0UL \
-} }
-
-#define CPU_MASK_CPU0 \
-/*(cpumask_t)*/ { { \
- [0] = 1UL \
-} }
-#endif /* __ia64__ */
-
#define cpumask_bits(maskp) ((maskp)->bits)
static inline int cpumask_scnprintf(char *buf, int len,
diff --git a/xen/include/xen/efi.h b/xen/include/xen/efi.h
index 647dafc473..231b6044fc 100644
--- a/xen/include/xen/efi.h
+++ b/xen/include/xen/efi.h
@@ -5,15 +5,11 @@
#include <xen/types.h>
#endif
-#if defined(__ia64__)
-# include_next <linux/efi.h>
+#if defined(__i386__)
+# define efi_enabled 0
#else
-
-# if defined(__i386__)
-# define efi_enabled 0
-# else
extern const bool_t efi_enabled;
-# endif
+#endif
#define EFI_INVALID_TABLE_ADDR (~0UL)
@@ -27,8 +23,6 @@ struct efi {
extern struct efi efi;
-#endif
-
#ifndef __ASSEMBLY__
union xenpf_efi_info;
diff --git a/xen/include/xen/elfcore.h b/xen/include/xen/elfcore.h
index d0fd4266e8..d3d3910f38 100644
--- a/xen/include/xen/elfcore.h
+++ b/xen/include/xen/elfcore.h
@@ -69,9 +69,6 @@ typedef struct {
unsigned long xen_phys_start;
unsigned long dom0_pfn_to_mfn_frame_list_list;
#endif
-#if defined(__ia64__)
- unsigned long dom0_mm_pgd_mfn;
-#endif
} crash_xen_info_t;
#endif /* __ELFCOREC_H__ */
diff --git a/xen/include/xen/hvm/irq.h b/xen/include/xen/hvm/irq.h
index b91c48de9b..5833369b58 100644
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -78,8 +78,6 @@ struct hvm_girq_dpci_mapping {
#define NR_LINK 4
#if defined(__i386__) || defined(__x86_64__)
# define NR_HVM_IRQS VIOAPIC_NUM_PINS
-#elif defined(__ia64__)
-# define NR_HVM_IRQS VIOSAPIC_NUM_PINS
#endif
/* Protected by domain's event_lock */
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 4f21e02818..6f7fbf73ef 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -35,11 +35,7 @@ extern bool_t iommu_debug;
extern bool_t amd_iommu_perdev_intremap;
/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
-#ifndef __ia64__
#define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share)
-#else
-#define iommu_use_hap_pt(d) 0
-#endif
extern struct rangeset *mmio_ro_ranges;
diff --git a/xen/include/xen/irq.h b/xen/include/xen/irq.h
index 61a155967a..852e038d5a 100644
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -95,37 +95,19 @@ int arch_init_one_irq_desc(struct irq_desc *);
#define irq_desc_initialized(desc) ((desc)->handler != NULL)
-#if defined(__ia64__)
-extern irq_desc_t irq_desc[NR_VECTORS];
-
-#define setup_irq(irq, action) \
- setup_irq_vector(irq_to_vector(irq), action)
-
-#define release_irq(irq) \
- release_irq_vector(irq_to_vector(irq))
-
-#define request_irq(irq, handler, irqflags, devname, devid) \
- request_irq_vector(irq_to_vector(irq), handler, irqflags, devname, devid)
-
-#elif defined(__arm__)
+#if defined(__arm__)
#define NR_IRQS 1024
#define nr_irqs NR_IRQS
extern irq_desc_t irq_desc[NR_IRQS];
-extern int setup_irq(unsigned int irq, struct irqaction *);
-extern void release_irq(unsigned int irq);
-extern int request_irq(unsigned int irq,
- void (*handler)(int, void *, struct cpu_user_regs *),
- unsigned long irqflags, const char * devname, void *dev_id);
+#endif
-#else
extern int setup_irq(unsigned int irq, struct irqaction *);
extern void release_irq(unsigned int irq);
extern int request_irq(unsigned int irq,
void (*handler)(int, void *, struct cpu_user_regs *),
unsigned long irqflags, const char * devname, void *dev_id);
-#endif
extern hw_irq_controller no_irq_type;
extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs);
diff --git a/xen/include/xen/libelf.h b/xen/include/xen/libelf.h
index 0ff8b5b71b..e8f6508698 100644
--- a/xen/include/xen/libelf.h
+++ b/xen/include/xen/libelf.h
@@ -23,7 +23,7 @@
#ifndef __XEN_LIBELF_H__
#define __XEN_LIBELF_H__
-#if defined(__i386__) || defined(__x86_64__) || defined(__ia64__) || defined(__arm__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
#define XEN_ELF_LITTLE_ENDIAN
#else
#error define architectural endianness
diff --git a/xen/include/xen/symbols.h b/xen/include/xen/symbols.h
index 5215b9a697..37cf6bfef9 100644
--- a/xen/include/xen/symbols.h
+++ b/xen/include/xen/symbols.h
@@ -21,13 +21,9 @@ static void __check_printsym_format(const char *fmt, ...)
{
}
-/* ia64 and ppc64 use function descriptors, which contain the real address */
-#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
-#define print_fn_descriptor_symbol(fmt, addr) \
-do { \
- unsigned long *__faddr = (unsigned long*) addr; \
- print_symbol(fmt, __faddr[0]); \
-} while (0)
+#if 0
+#define print_fn_descriptor_symbol(fmt, addr) \
+ print_symbol(fmt, *(unsigned long *)addr)
#else
#define print_fn_descriptor_symbol(fmt, addr) print_symbol(fmt, addr)
#endif